file_path
stringlengths
21
202
content
stringlengths
19
1.02M
size
int64
19
1.02M
lang
stringclasses
8 values
avg_line_length
float64
5.88
100
max_line_length
int64
12
993
alphanum_fraction
float64
0.27
0.93
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/utils.py
import asyncio import urllib.parse from typing import TYPE_CHECKING, Optional, Tuple if TYPE_CHECKING: from asgiref.typing import WWWScope def get_remote_addr(transport: asyncio.Transport) -> Optional[Tuple[str, int]]: socket_info = transport.get_extra_info("socket") if socket_info is not None: try: info = socket_info.getpeername() return (str(info[0]), int(info[1])) if isinstance(info, tuple) else None except OSError: # pragma: no cover # This case appears to inconsistently occur with uvloop # bound to a unix domain socket. return None info = transport.get_extra_info("peername") if info is not None and isinstance(info, (list, tuple)) and len(info) == 2: return (str(info[0]), int(info[1])) return None def get_local_addr(transport: asyncio.Transport) -> Optional[Tuple[str, int]]: socket_info = transport.get_extra_info("socket") if socket_info is not None: info = socket_info.getsockname() return (str(info[0]), int(info[1])) if isinstance(info, tuple) else None info = transport.get_extra_info("sockname") if info is not None and isinstance(info, (list, tuple)) and len(info) == 2: return (str(info[0]), int(info[1])) return None def is_ssl(transport: asyncio.Transport) -> bool: return bool(transport.get_extra_info("sslcontext")) def get_client_addr(scope: "WWWScope") -> str: client = scope.get("client") if not client: return "" return "%s:%d" % client def get_path_with_query_string(scope: "WWWScope") -> str: path_with_query_string = urllib.parse.quote(scope["path"]) if scope["query_string"]: path_with_query_string = "{}?{}".format( path_with_query_string, scope["query_string"].decode("ascii") ) return path_with_query_string
1,876
Python
32.517857
84
0.642857
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/websockets/wsproto_impl.py
import asyncio import logging import sys import typing from urllib.parse import unquote import wsproto from wsproto import ConnectionType, events from wsproto.connection import ConnectionState from wsproto.extensions import Extension, PerMessageDeflate from wsproto.utilities import RemoteProtocolError from uvicorn.config import Config from uvicorn.logging import TRACE_LOG_LEVEL from uvicorn.protocols.utils import ( get_local_addr, get_path_with_query_string, get_remote_addr, is_ssl, ) from uvicorn.server import ServerState if typing.TYPE_CHECKING: from asgiref.typing import ( ASGISendEvent, WebSocketAcceptEvent, WebSocketCloseEvent, WebSocketConnectEvent, WebSocketDisconnectEvent, WebSocketReceiveEvent, WebSocketScope, WebSocketSendEvent, ) WebSocketEvent = typing.Union[ "WebSocketReceiveEvent", "WebSocketDisconnectEvent", "WebSocketConnectEvent", ] if sys.version_info < (3, 8): # pragma: py-gte-38 from typing_extensions import Literal else: # pragma: py-lt-38 from typing import Literal class WSProtocol(asyncio.Protocol): def __init__( self, config: Config, server_state: ServerState, app_state: typing.Dict[str, typing.Any], _loop: typing.Optional[asyncio.AbstractEventLoop] = None, ) -> None: if not config.loaded: config.load() self.config = config self.app = config.loaded_app self.loop = _loop or asyncio.get_event_loop() self.logger = logging.getLogger("uvicorn.error") self.root_path = config.root_path self.app_state = app_state # Shared server state self.connections = server_state.connections self.tasks = server_state.tasks self.default_headers = server_state.default_headers # Connection state self.transport: asyncio.Transport = None # type: ignore[assignment] self.server: typing.Optional[typing.Tuple[str, int]] = None self.client: typing.Optional[typing.Tuple[str, int]] = None self.scheme: Literal["wss", "ws"] = None # type: ignore[assignment] # WebSocket state self.queue: asyncio.Queue["WebSocketEvent"] = asyncio.Queue() self.handshake_complete = False self.close_sent = False self.conn = wsproto.WSConnection(connection_type=ConnectionType.SERVER) self.read_paused = False self.writable = asyncio.Event() self.writable.set() # Buffers self.bytes = b"" self.text = "" # Protocol interface def connection_made( # type: ignore[override] self, transport: asyncio.Transport ) -> None: self.connections.add(self) self.transport = transport self.server = get_local_addr(transport) self.client = get_remote_addr(transport) self.scheme = "wss" if is_ssl(transport) else "ws" if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection made", prefix) def connection_lost(self, exc: typing.Optional[Exception]) -> None: code = 1005 if self.handshake_complete else 1006 self.queue.put_nowait({"type": "websocket.disconnect", "code": code}) self.connections.remove(self) if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection lost", prefix) self.handshake_complete = True if exc is None: self.transport.close() def eof_received(self) -> None: pass def data_received(self, data: bytes) -> None: try: self.conn.receive_data(data) except RemoteProtocolError as err: # TODO: Remove `type: ignore` when wsproto fixes the type annotation. self.transport.write(self.conn.send(err.event_hint)) # type: ignore[arg-type] # noqa: E501 self.transport.close() else: self.handle_events() def handle_events(self) -> None: for event in self.conn.events(): if isinstance(event, events.Request): self.handle_connect(event) elif isinstance(event, events.TextMessage): self.handle_text(event) elif isinstance(event, events.BytesMessage): self.handle_bytes(event) elif isinstance(event, events.CloseConnection): self.handle_close(event) elif isinstance(event, events.Ping): self.handle_ping(event) def pause_writing(self) -> None: """ Called by the transport when the write buffer exceeds the high water mark. """ self.writable.clear() def resume_writing(self) -> None: """ Called by the transport when the write buffer drops below the low water mark. """ self.writable.set() def shutdown(self) -> None: if self.handshake_complete: self.queue.put_nowait({"type": "websocket.disconnect", "code": 1012}) output = self.conn.send(wsproto.events.CloseConnection(code=1012)) self.transport.write(output) else: self.send_500_response() self.transport.close() def on_task_complete(self, task: asyncio.Task) -> None: self.tasks.discard(task) # Event handlers def handle_connect(self, event: events.Request) -> None: headers = [(b"host", event.host.encode())] headers += [(key.lower(), value) for key, value in event.extra_headers] raw_path, _, query_string = event.target.partition("?") self.scope: "WebSocketScope" = { # type: ignore[typeddict-item] "type": "websocket", "asgi": {"version": self.config.asgi_version, "spec_version": "2.3"}, "http_version": "1.1", "scheme": self.scheme, "server": self.server, "client": self.client, "root_path": self.root_path, "path": unquote(raw_path), "raw_path": raw_path.encode("ascii"), "query_string": query_string.encode("ascii"), "headers": headers, "subprotocols": event.subprotocols, "extensions": None, "state": self.app_state.copy(), } self.queue.put_nowait({"type": "websocket.connect"}) task = self.loop.create_task(self.run_asgi()) task.add_done_callback(self.on_task_complete) self.tasks.add(task) def handle_text(self, event: events.TextMessage) -> None: self.text += event.data if event.message_finished: msg: "WebSocketReceiveEvent" = { # type: ignore[typeddict-item] "type": "websocket.receive", "text": self.text, } self.queue.put_nowait(msg) self.text = "" if not self.read_paused: self.read_paused = True self.transport.pause_reading() def handle_bytes(self, event: events.BytesMessage) -> None: self.bytes += event.data # todo: we may want to guard the size of self.bytes and self.text if event.message_finished: msg: "WebSocketReceiveEvent" = { # type: ignore[typeddict-item] "type": "websocket.receive", "bytes": self.bytes, } self.queue.put_nowait(msg) self.bytes = b"" if not self.read_paused: self.read_paused = True self.transport.pause_reading() def handle_close(self, event: events.CloseConnection) -> None: if self.conn.state == ConnectionState.REMOTE_CLOSING: self.transport.write(self.conn.send(event.response())) self.queue.put_nowait({"type": "websocket.disconnect", "code": event.code}) self.transport.close() def handle_ping(self, event: events.Ping) -> None: self.transport.write(self.conn.send(event.response())) def send_500_response(self) -> None: headers = [ (b"content-type", b"text/plain; charset=utf-8"), (b"connection", b"close"), ] output = self.conn.send( wsproto.events.RejectConnection( status_code=500, headers=headers, has_body=True ) ) output += self.conn.send( wsproto.events.RejectData(data=b"Internal Server Error") ) self.transport.write(output) async def run_asgi(self) -> None: try: result = await self.app(self.scope, self.receive, self.send) except BaseException: self.logger.exception("Exception in ASGI application\n") if not self.handshake_complete: self.send_500_response() self.transport.close() else: if not self.handshake_complete: msg = "ASGI callable returned without completing handshake." self.logger.error(msg) self.send_500_response() self.transport.close() elif result is not None: msg = "ASGI callable should return None, but returned '%s'." self.logger.error(msg, result) self.transport.close() async def send(self, message: "ASGISendEvent") -> None: await self.writable.wait() message_type = message["type"] if not self.handshake_complete: if message_type == "websocket.accept": message = typing.cast("WebSocketAcceptEvent", message) self.logger.info( '%s - "WebSocket %s" [accepted]', self.scope["client"], get_path_with_query_string(self.scope), ) subprotocol = message.get("subprotocol") extra_headers = self.default_headers + list(message.get("headers", [])) extensions: typing.List[Extension] = [] if self.config.ws_per_message_deflate: extensions.append(PerMessageDeflate()) if not self.transport.is_closing(): self.handshake_complete = True output = self.conn.send( wsproto.events.AcceptConnection( subprotocol=subprotocol, extensions=extensions, extra_headers=extra_headers, ) ) self.transport.write(output) elif message_type == "websocket.close": self.queue.put_nowait({"type": "websocket.disconnect", "code": 1006}) self.logger.info( '%s - "WebSocket %s" 403', self.scope["client"], get_path_with_query_string(self.scope), ) self.handshake_complete = True self.close_sent = True event = events.RejectConnection(status_code=403, headers=[]) output = self.conn.send(event) self.transport.write(output) self.transport.close() else: msg = ( "Expected ASGI message 'websocket.accept' or 'websocket.close', " "but got '%s'." ) raise RuntimeError(msg % message_type) elif not self.close_sent: if message_type == "websocket.send": message = typing.cast("WebSocketSendEvent", message) bytes_data = message.get("bytes") text_data = message.get("text") data = text_data if bytes_data is None else bytes_data output = self.conn.send( wsproto.events.Message(data=data) # type: ignore[type-var] ) if not self.transport.is_closing(): self.transport.write(output) elif message_type == "websocket.close": message = typing.cast("WebSocketCloseEvent", message) self.close_sent = True code = message.get("code", 1000) reason = message.get("reason", "") or "" self.queue.put_nowait({"type": "websocket.disconnect", "code": code}) output = self.conn.send( wsproto.events.CloseConnection(code=code, reason=reason) ) if not self.transport.is_closing(): self.transport.write(output) self.transport.close() else: msg = ( "Expected ASGI message 'websocket.send' or 'websocket.close'," " but got '%s'." ) raise RuntimeError(msg % message_type) else: msg = "Unexpected ASGI message '%s', after sending 'websocket.close'." raise RuntimeError(msg % message_type) async def receive(self) -> "WebSocketEvent": message = await self.queue.get() if self.read_paused and self.queue.empty(): self.read_paused = False self.transport.resume_reading() return message
13,486
Python
36.673184
104
0.565846
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/websockets/auto.py
import asyncio import typing AutoWebSocketsProtocol: typing.Optional[typing.Callable[..., asyncio.Protocol]] try: import websockets # noqa except ImportError: # pragma: no cover try: import wsproto # noqa except ImportError: AutoWebSocketsProtocol = None else: from uvicorn.protocols.websockets.wsproto_impl import WSProtocol AutoWebSocketsProtocol = WSProtocol else: from uvicorn.protocols.websockets.websockets_impl import WebSocketProtocol AutoWebSocketsProtocol = WebSocketProtocol
548
Python
26.449999
79
0.74635
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/websockets/websockets_impl.py
import asyncio import http import logging import sys from typing import ( TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union, cast, ) from urllib.parse import unquote import websockets from websockets.datastructures import Headers from websockets.exceptions import ConnectionClosed from websockets.extensions.permessage_deflate import ServerPerMessageDeflateFactory from websockets.legacy.server import HTTPResponse from websockets.server import WebSocketServerProtocol from websockets.typing import Subprotocol from uvicorn.config import Config from uvicorn.logging import TRACE_LOG_LEVEL from uvicorn.protocols.utils import ( get_local_addr, get_path_with_query_string, get_remote_addr, is_ssl, ) from uvicorn.server import ServerState if sys.version_info < (3, 8): # pragma: py-gte-38 from typing_extensions import Literal else: # pragma: py-lt-38 from typing import Literal if TYPE_CHECKING: from asgiref.typing import ( ASGISendEvent, WebSocketAcceptEvent, WebSocketCloseEvent, WebSocketConnectEvent, WebSocketDisconnectEvent, WebSocketReceiveEvent, WebSocketScope, WebSocketSendEvent, ) class Server: closing = False def register(self, ws: WebSocketServerProtocol) -> None: pass def unregister(self, ws: WebSocketServerProtocol) -> None: pass def is_serving(self) -> bool: return not self.closing class WebSocketProtocol(WebSocketServerProtocol): extra_headers: List[Tuple[str, str]] def __init__( self, config: Config, server_state: ServerState, app_state: Dict[str, Any], _loop: Optional[asyncio.AbstractEventLoop] = None, ): if not config.loaded: config.load() self.config = config self.app = config.loaded_app self.loop = _loop or asyncio.get_event_loop() self.root_path = config.root_path self.app_state = app_state # Shared server state self.connections = server_state.connections self.tasks = server_state.tasks # Connection state self.transport: asyncio.Transport = None # type: ignore[assignment] self.server: Optional[Tuple[str, int]] = None self.client: Optional[Tuple[str, int]] = None self.scheme: Literal["wss", "ws"] = None # type: ignore[assignment] # Connection events self.scope: WebSocketScope = None # type: ignore[assignment] self.handshake_started_event = asyncio.Event() self.handshake_completed_event = asyncio.Event() self.closed_event = asyncio.Event() self.initial_response: Optional[HTTPResponse] = None self.connect_sent = False self.lost_connection_before_handshake = False self.accepted_subprotocol: Optional[Subprotocol] = None self.ws_server: Server = Server() # type: ignore[assignment] extensions = [] if self.config.ws_per_message_deflate: extensions.append(ServerPerMessageDeflateFactory()) super().__init__( ws_handler=self.ws_handler, ws_server=self.ws_server, # type: ignore[arg-type] max_size=self.config.ws_max_size, ping_interval=self.config.ws_ping_interval, ping_timeout=self.config.ws_ping_timeout, extensions=extensions, logger=logging.getLogger("uvicorn.error"), ) self.server_header = None self.extra_headers = [ (name.decode("latin-1"), value.decode("latin-1")) for name, value in server_state.default_headers ] def connection_made( # type: ignore[override] self, transport: asyncio.Transport ) -> None: self.connections.add(self) self.transport = transport self.server = get_local_addr(transport) self.client = get_remote_addr(transport) self.scheme = "wss" if is_ssl(transport) else "ws" if self.logger.isEnabledFor(TRACE_LOG_LEVEL): prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection made", prefix) super().connection_made(transport) def connection_lost(self, exc: Optional[Exception]) -> None: self.connections.remove(self) if self.logger.isEnabledFor(TRACE_LOG_LEVEL): prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection lost", prefix) self.lost_connection_before_handshake = ( not self.handshake_completed_event.is_set() ) self.handshake_completed_event.set() super().connection_lost(exc) if exc is None: self.transport.close() def shutdown(self) -> None: self.ws_server.closing = True if self.handshake_completed_event.is_set(): self.fail_connection(1012) else: self.send_500_response() self.transport.close() def on_task_complete(self, task: asyncio.Task) -> None: self.tasks.discard(task) async def process_request( self, path: str, headers: Headers ) -> Optional[HTTPResponse]: """ This hook is called to determine if the websocket should return an HTTP response and close. Our behavior here is to start the ASGI application, and then wait for either `accept` or `close` in order to determine if we should close the connection. """ path_portion, _, query_string = path.partition("?") websockets.legacy.handshake.check_request(headers) subprotocols = [] for header in headers.get_all("Sec-WebSocket-Protocol"): subprotocols.extend([token.strip() for token in header.split(",")]) asgi_headers = [ (name.encode("ascii"), value.encode("ascii", errors="surrogateescape")) for name, value in headers.raw_items() ] self.scope = { # type: ignore[typeddict-item] "type": "websocket", "asgi": {"version": self.config.asgi_version, "spec_version": "2.3"}, "http_version": "1.1", "scheme": self.scheme, "server": self.server, "client": self.client, "root_path": self.root_path, "path": unquote(path_portion), "raw_path": path_portion.encode("ascii"), "query_string": query_string.encode("ascii"), "headers": asgi_headers, "subprotocols": subprotocols, "state": self.app_state.copy(), } task = self.loop.create_task(self.run_asgi()) task.add_done_callback(self.on_task_complete) self.tasks.add(task) await self.handshake_started_event.wait() return self.initial_response def process_subprotocol( self, headers: Headers, available_subprotocols: Optional[Sequence[Subprotocol]] ) -> Optional[Subprotocol]: """ We override the standard 'process_subprotocol' behavior here so that we return whatever subprotocol is sent in the 'accept' message. """ return self.accepted_subprotocol def send_500_response(self) -> None: msg = b"Internal Server Error" content = [ b"HTTP/1.1 500 Internal Server Error\r\n" b"content-type: text/plain; charset=utf-8\r\n", b"content-length: " + str(len(msg)).encode("ascii") + b"\r\n", b"connection: close\r\n", b"\r\n", msg, ] self.transport.write(b"".join(content)) # Allow handler task to terminate cleanly, as websockets doesn't cancel it by # itself (see https://github.com/encode/uvicorn/issues/920) self.handshake_started_event.set() async def ws_handler( # type: ignore[override] self, protocol: WebSocketServerProtocol, path: str ) -> Any: """ This is the main handler function for the 'websockets' implementation to call into. We just wait for close then return, and instead allow 'send' and 'receive' events to drive the flow. """ self.handshake_completed_event.set() await self.closed_event.wait() async def run_asgi(self) -> None: """ Wrapper around the ASGI callable, handling exceptions and unexpected termination states. """ try: result = await self.app(self.scope, self.asgi_receive, self.asgi_send) except BaseException as exc: self.closed_event.set() msg = "Exception in ASGI application\n" self.logger.error(msg, exc_info=exc) if not self.handshake_started_event.is_set(): self.send_500_response() else: await self.handshake_completed_event.wait() self.transport.close() else: self.closed_event.set() if not self.handshake_started_event.is_set(): msg = "ASGI callable returned without sending handshake." self.logger.error(msg) self.send_500_response() self.transport.close() elif result is not None: msg = "ASGI callable should return None, but returned '%s'." self.logger.error(msg, result) await self.handshake_completed_event.wait() self.transport.close() async def asgi_send(self, message: "ASGISendEvent") -> None: message_type = message["type"] if not self.handshake_started_event.is_set(): if message_type == "websocket.accept": message = cast("WebSocketAcceptEvent", message) self.logger.info( '%s - "WebSocket %s" [accepted]', self.scope["client"], get_path_with_query_string(self.scope), ) self.initial_response = None self.accepted_subprotocol = cast( Optional[Subprotocol], message.get("subprotocol") ) if "headers" in message: self.extra_headers.extend( # ASGI spec requires bytes # But for compatibility we need to convert it to strings (name.decode("latin-1"), value.decode("latin-1")) for name, value in message["headers"] ) self.handshake_started_event.set() elif message_type == "websocket.close": message = cast("WebSocketCloseEvent", message) self.logger.info( '%s - "WebSocket %s" 403', self.scope["client"], get_path_with_query_string(self.scope), ) self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b"") self.handshake_started_event.set() self.closed_event.set() else: msg = ( "Expected ASGI message 'websocket.accept' or 'websocket.close', " "but got '%s'." ) raise RuntimeError(msg % message_type) elif not self.closed_event.is_set(): await self.handshake_completed_event.wait() if message_type == "websocket.send": message = cast("WebSocketSendEvent", message) bytes_data = message.get("bytes") text_data = message.get("text") data = text_data if bytes_data is None else bytes_data await self.send(data) # type: ignore[arg-type] elif message_type == "websocket.close": message = cast("WebSocketCloseEvent", message) code = message.get("code", 1000) reason = message.get("reason", "") or "" await self.close(code, reason) self.closed_event.set() else: msg = ( "Expected ASGI message 'websocket.send' or 'websocket.close'," " but got '%s'." ) raise RuntimeError(msg % message_type) else: msg = "Unexpected ASGI message '%s', after sending 'websocket.close'." raise RuntimeError(msg % message_type) async def asgi_receive( self, ) -> Union[ "WebSocketDisconnectEvent", "WebSocketConnectEvent", "WebSocketReceiveEvent" ]: if not self.connect_sent: self.connect_sent = True return {"type": "websocket.connect"} await self.handshake_completed_event.wait() if self.lost_connection_before_handshake: # If the handshake failed or the app closed before handshake completion, # use 1006 Abnormal Closure. return {"type": "websocket.disconnect", "code": 1006} if self.closed_event.is_set(): return {"type": "websocket.disconnect", "code": 1005} try: data = await self.recv() except ConnectionClosed as exc: self.closed_event.set() if self.ws_server.closing: return {"type": "websocket.disconnect", "code": 1012} return {"type": "websocket.disconnect", "code": exc.code} msg: WebSocketReceiveEvent = { # type: ignore[typeddict-item] "type": "websocket.receive" } if isinstance(data, str): msg["text"] = data else: msg["bytes"] = data return msg
13,784
Python
34.898437
87
0.580963
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/http/flow_control.py
import asyncio import typing if typing.TYPE_CHECKING: from asgiref.typing import ( ASGIReceiveCallable, ASGISendCallable, HTTPResponseBodyEvent, HTTPResponseStartEvent, Scope, ) CLOSE_HEADER = (b"connection", b"close") HIGH_WATER_LIMIT = 65536 class FlowControl: def __init__(self, transport: asyncio.Transport) -> None: self._transport = transport self.read_paused = False self.write_paused = False self._is_writable_event = asyncio.Event() self._is_writable_event.set() async def drain(self) -> None: await self._is_writable_event.wait() def pause_reading(self) -> None: if not self.read_paused: self.read_paused = True self._transport.pause_reading() def resume_reading(self) -> None: if self.read_paused: self.read_paused = False self._transport.resume_reading() def pause_writing(self) -> None: if not self.write_paused: self.write_paused = True self._is_writable_event.clear() def resume_writing(self) -> None: if self.write_paused: self.write_paused = False self._is_writable_event.set() async def service_unavailable( scope: "Scope", receive: "ASGIReceiveCallable", send: "ASGISendCallable" ) -> None: response_start: "HTTPResponseStartEvent" = { "type": "http.response.start", "status": 503, "headers": [ (b"content-type", b"text/plain; charset=utf-8"), (b"connection", b"close"), ], } await send(response_start) response_body: "HTTPResponseBodyEvent" = { "type": "http.response.body", "body": b"Service Unavailable", "more_body": False, } await send(response_body)
1,844
Python
25.73913
76
0.59436
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/http/auto.py
import asyncio from typing import Type AutoHTTPProtocol: Type[asyncio.Protocol] try: import httptools # noqa except ImportError: # pragma: no cover from uvicorn.protocols.http.h11_impl import H11Protocol AutoHTTPProtocol = H11Protocol else: # pragma: no cover from uvicorn.protocols.http.httptools_impl import HttpToolsProtocol AutoHTTPProtocol = HttpToolsProtocol
391
Python
25.133332
71
0.780051
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/http/h11_impl.py
import asyncio import http import logging import sys from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union, cast, ) from urllib.parse import unquote import h11 from h11._connection import DEFAULT_MAX_INCOMPLETE_EVENT_SIZE from uvicorn.config import Config from uvicorn.logging import TRACE_LOG_LEVEL from uvicorn.protocols.http.flow_control import ( CLOSE_HEADER, HIGH_WATER_LIMIT, FlowControl, service_unavailable, ) from uvicorn.protocols.utils import ( get_client_addr, get_local_addr, get_path_with_query_string, get_remote_addr, is_ssl, ) from uvicorn.server import ServerState if sys.version_info < (3, 8): # pragma: py-gte-38 from typing_extensions import Literal else: # pragma: py-lt-38 from typing import Literal if TYPE_CHECKING: from asgiref.typing import ( ASGI3Application, ASGIReceiveEvent, ASGISendEvent, HTTPDisconnectEvent, HTTPRequestEvent, HTTPResponseBodyEvent, HTTPResponseStartEvent, HTTPScope, ) H11Event = Union[ h11.Request, h11.InformationalResponse, h11.Response, h11.Data, h11.EndOfMessage, h11.ConnectionClosed, ] def _get_status_phrase(status_code: int) -> bytes: try: return http.HTTPStatus(status_code).phrase.encode() except ValueError: return b"" STATUS_PHRASES = { status_code: _get_status_phrase(status_code) for status_code in range(100, 600) } class H11Protocol(asyncio.Protocol): def __init__( self, config: Config, server_state: ServerState, app_state: Dict[str, Any], _loop: Optional[asyncio.AbstractEventLoop] = None, ) -> None: if not config.loaded: config.load() self.config = config self.app = config.loaded_app self.loop = _loop or asyncio.get_event_loop() self.logger = logging.getLogger("uvicorn.error") self.access_logger = logging.getLogger("uvicorn.access") self.access_log = self.access_logger.hasHandlers() self.conn = h11.Connection( h11.SERVER, config.h11_max_incomplete_event_size if config.h11_max_incomplete_event_size is not None else DEFAULT_MAX_INCOMPLETE_EVENT_SIZE, ) self.ws_protocol_class = config.ws_protocol_class self.root_path = config.root_path self.limit_concurrency = config.limit_concurrency self.app_state = app_state # Timeouts self.timeout_keep_alive_task: Optional[asyncio.TimerHandle] = None self.timeout_keep_alive = config.timeout_keep_alive # Shared server state self.server_state = server_state self.connections = server_state.connections self.tasks = server_state.tasks # Per-connection state self.transport: asyncio.Transport = None # type: ignore[assignment] self.flow: FlowControl = None # type: ignore[assignment] self.server: Optional[Tuple[str, int]] = None self.client: Optional[Tuple[str, int]] = None self.scheme: Optional[Literal["http", "https"]] = None # Per-request state self.scope: HTTPScope = None # type: ignore[assignment] self.headers: List[Tuple[bytes, bytes]] = None # type: ignore[assignment] self.cycle: RequestResponseCycle = None # type: ignore[assignment] # Protocol interface def connection_made( # type: ignore[override] self, transport: asyncio.Transport ) -> None: self.connections.add(self) self.transport = transport self.flow = FlowControl(transport) self.server = get_local_addr(transport) self.client = get_remote_addr(transport) self.scheme = "https" if is_ssl(transport) else "http" if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sHTTP connection made", prefix) def connection_lost(self, exc: Optional[Exception]) -> None: self.connections.discard(self) if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sHTTP connection lost", prefix) if self.cycle and not self.cycle.response_complete: self.cycle.disconnected = True if self.conn.our_state != h11.ERROR: event = h11.ConnectionClosed() try: self.conn.send(event) except h11.LocalProtocolError: # Premature client disconnect pass if self.cycle is not None: self.cycle.message_event.set() if self.flow is not None: self.flow.resume_writing() if exc is None: self.transport.close() self._unset_keepalive_if_required() def eof_received(self) -> None: pass def _unset_keepalive_if_required(self) -> None: if self.timeout_keep_alive_task is not None: self.timeout_keep_alive_task.cancel() self.timeout_keep_alive_task = None def _get_upgrade(self) -> Optional[bytes]: connection = [] upgrade = None for name, value in self.headers: if name == b"connection": connection = [token.lower().strip() for token in value.split(b",")] if name == b"upgrade": upgrade = value.lower() if b"upgrade" in connection: return upgrade return None def _should_upgrade_to_ws(self) -> bool: if self.ws_protocol_class is None: if self.config.ws == "auto": msg = "Unsupported upgrade request." self.logger.warning(msg) msg = "No supported WebSocket library detected. Please use 'pip install uvicorn[standard]', or install 'websockets' or 'wsproto' manually." # noqa: E501 self.logger.warning(msg) return False return True def data_received(self, data: bytes) -> None: self._unset_keepalive_if_required() self.conn.receive_data(data) self.handle_events() def handle_events(self) -> None: while True: try: event = self.conn.next_event() except h11.RemoteProtocolError: msg = "Invalid HTTP request received." self.logger.warning(msg) self.send_400_response(msg) return event_type = type(event) if event_type is h11.NEED_DATA: break elif event_type is h11.PAUSED: # This case can occur in HTTP pipelining, so we need to # stop reading any more data, and ensure that at the end # of the active request/response cycle we handle any # events that have been buffered up. self.flow.pause_reading() break elif event_type is h11.Request: self.headers = [(key.lower(), value) for key, value in event.headers] raw_path, _, query_string = event.target.partition(b"?") self.scope = { # type: ignore[typeddict-item] "type": "http", "asgi": { "version": self.config.asgi_version, "spec_version": "2.3", }, "http_version": event.http_version.decode("ascii"), "server": self.server, "client": self.client, "scheme": self.scheme, "method": event.method.decode("ascii"), "root_path": self.root_path, "path": unquote(raw_path.decode("ascii")), "raw_path": raw_path, "query_string": query_string, "headers": self.headers, "state": self.app_state.copy(), } upgrade = self._get_upgrade() if upgrade == b"websocket" and self._should_upgrade_to_ws(): self.handle_websocket_upgrade(event) return # Handle 503 responses when 'limit_concurrency' is exceeded. if self.limit_concurrency is not None and ( len(self.connections) >= self.limit_concurrency or len(self.tasks) >= self.limit_concurrency ): app = service_unavailable message = "Exceeded concurrency limit." self.logger.warning(message) else: app = self.app self.cycle = RequestResponseCycle( scope=self.scope, conn=self.conn, transport=self.transport, flow=self.flow, logger=self.logger, access_logger=self.access_logger, access_log=self.access_log, default_headers=self.server_state.default_headers, message_event=asyncio.Event(), on_response=self.on_response_complete, ) task = self.loop.create_task(self.cycle.run_asgi(app)) task.add_done_callback(self.tasks.discard) self.tasks.add(task) elif event_type is h11.Data: if self.conn.our_state is h11.DONE: continue self.cycle.body += event.data if len(self.cycle.body) > HIGH_WATER_LIMIT: self.flow.pause_reading() self.cycle.message_event.set() elif event_type is h11.EndOfMessage: if self.conn.our_state is h11.DONE: self.transport.resume_reading() self.conn.start_next_cycle() continue self.cycle.more_body = False self.cycle.message_event.set() def handle_websocket_upgrade(self, event: H11Event) -> None: if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sUpgrading to WebSocket", prefix) self.connections.discard(self) output = [event.method, b" ", event.target, b" HTTP/1.1\r\n"] for name, value in self.headers: output += [name, b": ", value, b"\r\n"] output.append(b"\r\n") protocol = self.ws_protocol_class( # type: ignore[call-arg, misc] config=self.config, server_state=self.server_state, app_state=self.app_state, ) protocol.connection_made(self.transport) protocol.data_received(b"".join(output)) self.transport.set_protocol(protocol) def send_400_response(self, msg: str) -> None: reason = STATUS_PHRASES[400] headers = [ (b"content-type", b"text/plain; charset=utf-8"), (b"connection", b"close"), ] event = h11.Response(status_code=400, headers=headers, reason=reason) output = self.conn.send(event) self.transport.write(output) event = h11.Data(data=msg.encode("ascii")) output = self.conn.send(event) self.transport.write(output) event = h11.EndOfMessage() output = self.conn.send(event) self.transport.write(output) self.transport.close() def on_response_complete(self) -> None: self.server_state.total_requests += 1 if self.transport.is_closing(): return # Set a short Keep-Alive timeout. self._unset_keepalive_if_required() self.timeout_keep_alive_task = self.loop.call_later( self.timeout_keep_alive, self.timeout_keep_alive_handler ) # Unpause data reads if needed. self.flow.resume_reading() # Unblock any pipelined events. if self.conn.our_state is h11.DONE and self.conn.their_state is h11.DONE: self.conn.start_next_cycle() self.handle_events() def shutdown(self) -> None: """ Called by the server to commence a graceful shutdown. """ if self.cycle is None or self.cycle.response_complete: event = h11.ConnectionClosed() self.conn.send(event) self.transport.close() else: self.cycle.keep_alive = False def pause_writing(self) -> None: """ Called by the transport when the write buffer exceeds the high water mark. """ self.flow.pause_writing() def resume_writing(self) -> None: """ Called by the transport when the write buffer drops below the low water mark. """ self.flow.resume_writing() def timeout_keep_alive_handler(self) -> None: """ Called on a keep-alive connection if no new data is received after a short delay. """ if not self.transport.is_closing(): event = h11.ConnectionClosed() self.conn.send(event) self.transport.close() class RequestResponseCycle: def __init__( self, scope: "HTTPScope", conn: h11.Connection, transport: asyncio.Transport, flow: FlowControl, logger: logging.Logger, access_logger: logging.Logger, access_log: bool, default_headers: List[Tuple[bytes, bytes]], message_event: asyncio.Event, on_response: Callable[..., None], ) -> None: self.scope = scope self.conn = conn self.transport = transport self.flow = flow self.logger = logger self.access_logger = access_logger self.access_log = access_log self.default_headers = default_headers self.message_event = message_event self.on_response = on_response # Connection state self.disconnected = False self.keep_alive = True self.waiting_for_100_continue = conn.they_are_waiting_for_100_continue # Request state self.body = b"" self.more_body = True # Response state self.response_started = False self.response_complete = False # ASGI exception wrapper async def run_asgi(self, app: "ASGI3Application") -> None: try: result = await app( # type: ignore[func-returns-value] self.scope, self.receive, self.send ) except BaseException as exc: msg = "Exception in ASGI application\n" self.logger.error(msg, exc_info=exc) if not self.response_started: await self.send_500_response() else: self.transport.close() else: if result is not None: msg = "ASGI callable should return None, but returned '%s'." self.logger.error(msg, result) self.transport.close() elif not self.response_started and not self.disconnected: msg = "ASGI callable returned without starting response." self.logger.error(msg) await self.send_500_response() elif not self.response_complete and not self.disconnected: msg = "ASGI callable returned without completing response." self.logger.error(msg) self.transport.close() finally: self.on_response = lambda: None async def send_500_response(self) -> None: response_start_event: "HTTPResponseStartEvent" = { "type": "http.response.start", "status": 500, "headers": [ (b"content-type", b"text/plain; charset=utf-8"), (b"connection", b"close"), ], } await self.send(response_start_event) response_body_event: "HTTPResponseBodyEvent" = { "type": "http.response.body", "body": b"Internal Server Error", "more_body": False, } await self.send(response_body_event) # ASGI interface async def send(self, message: "ASGISendEvent") -> None: message_type = message["type"] if self.flow.write_paused and not self.disconnected: await self.flow.drain() if self.disconnected: return if not self.response_started: # Sending response status line and headers if message_type != "http.response.start": msg = "Expected ASGI message 'http.response.start', but got '%s'." raise RuntimeError(msg % message_type) message = cast("HTTPResponseStartEvent", message) self.response_started = True self.waiting_for_100_continue = False status_code = message["status"] headers = self.default_headers + list(message.get("headers", [])) if CLOSE_HEADER in self.scope["headers"] and CLOSE_HEADER not in headers: headers = headers + [CLOSE_HEADER] if self.access_log: self.access_logger.info( '%s - "%s %s HTTP/%s" %d', get_client_addr(self.scope), self.scope["method"], get_path_with_query_string(self.scope), self.scope["http_version"], status_code, ) # Write response status line and headers reason = STATUS_PHRASES[status_code] event = h11.Response( status_code=status_code, headers=headers, reason=reason ) output = self.conn.send(event) self.transport.write(output) elif not self.response_complete: # Sending response body if message_type != "http.response.body": msg = "Expected ASGI message 'http.response.body', but got '%s'." raise RuntimeError(msg % message_type) message = cast("HTTPResponseBodyEvent", message) body = message.get("body", b"") more_body = message.get("more_body", False) # Write response body if self.scope["method"] == "HEAD": event = h11.Data(data=b"") else: event = h11.Data(data=body) output = self.conn.send(event) self.transport.write(output) # Handle response completion if not more_body: self.response_complete = True self.message_event.set() event = h11.EndOfMessage() output = self.conn.send(event) self.transport.write(output) else: # Response already sent msg = "Unexpected ASGI message '%s' sent, after response already completed." raise RuntimeError(msg % message_type) if self.response_complete: if self.conn.our_state is h11.MUST_CLOSE or not self.keep_alive: event = h11.ConnectionClosed() self.conn.send(event) self.transport.close() self.on_response() async def receive(self) -> "ASGIReceiveEvent": if self.waiting_for_100_continue and not self.transport.is_closing(): event = h11.InformationalResponse( status_code=100, headers=[], reason="Continue" ) output = self.conn.send(event) self.transport.write(output) self.waiting_for_100_continue = False if not self.disconnected and not self.response_complete: self.flow.resume_reading() await self.message_event.wait() self.message_event.clear() message: "Union[HTTPDisconnectEvent, HTTPRequestEvent]" if self.disconnected or self.response_complete: message = {"type": "http.disconnect"} else: message = { "type": "http.request", "body": self.body, "more_body": self.more_body, } self.body = b"" return message
20,487
Python
34.324138
169
0.559574
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/http/httptools_impl.py
import asyncio import http import logging import re import sys import urllib from asyncio.events import TimerHandle from collections import deque from typing import ( TYPE_CHECKING, Any, Callable, Deque, Dict, List, Optional, Tuple, Union, cast, ) import httptools from uvicorn.config import Config from uvicorn.logging import TRACE_LOG_LEVEL from uvicorn.protocols.http.flow_control import ( CLOSE_HEADER, HIGH_WATER_LIMIT, FlowControl, service_unavailable, ) from uvicorn.protocols.utils import ( get_client_addr, get_local_addr, get_path_with_query_string, get_remote_addr, is_ssl, ) from uvicorn.server import ServerState if sys.version_info < (3, 8): # pragma: py-gte-38 from typing_extensions import Literal else: # pragma: py-lt-38 from typing import Literal if TYPE_CHECKING: from asgiref.typing import ( ASGI3Application, ASGIReceiveEvent, ASGISendEvent, HTTPDisconnectEvent, HTTPRequestEvent, HTTPResponseBodyEvent, HTTPResponseStartEvent, HTTPScope, ) HEADER_RE = re.compile(b'[\x00-\x1F\x7F()<>@,;:[]={} \t\\"]') HEADER_VALUE_RE = re.compile(b"[\x00-\x1F\x7F]") def _get_status_line(status_code: int) -> bytes: try: phrase = http.HTTPStatus(status_code).phrase.encode() except ValueError: phrase = b"" return b"".join([b"HTTP/1.1 ", str(status_code).encode(), b" ", phrase, b"\r\n"]) STATUS_LINE = { status_code: _get_status_line(status_code) for status_code in range(100, 600) } class HttpToolsProtocol(asyncio.Protocol): def __init__( self, config: Config, server_state: ServerState, app_state: Dict[str, Any], _loop: Optional[asyncio.AbstractEventLoop] = None, ) -> None: if not config.loaded: config.load() self.config = config self.app = config.loaded_app self.loop = _loop or asyncio.get_event_loop() self.logger = logging.getLogger("uvicorn.error") self.access_logger = logging.getLogger("uvicorn.access") self.access_log = self.access_logger.hasHandlers() self.parser = httptools.HttpRequestParser(self) self.ws_protocol_class = config.ws_protocol_class self.root_path = config.root_path self.limit_concurrency = config.limit_concurrency self.app_state = app_state # Timeouts self.timeout_keep_alive_task: Optional[TimerHandle] = None self.timeout_keep_alive = config.timeout_keep_alive # Global state self.server_state = server_state self.connections = server_state.connections self.tasks = server_state.tasks # Per-connection state self.transport: asyncio.Transport = None # type: ignore[assignment] self.flow: FlowControl = None # type: ignore[assignment] self.server: Optional[Tuple[str, int]] = None self.client: Optional[Tuple[str, int]] = None self.scheme: Optional[Literal["http", "https"]] = None self.pipeline: Deque[Tuple[RequestResponseCycle, ASGI3Application]] = deque() # Per-request state self.scope: HTTPScope = None # type: ignore[assignment] self.headers: List[Tuple[bytes, bytes]] = None # type: ignore[assignment] self.expect_100_continue = False self.cycle: RequestResponseCycle = None # type: ignore[assignment] # Protocol interface def connection_made( # type: ignore[override] self, transport: asyncio.Transport ) -> None: self.connections.add(self) self.transport = transport self.flow = FlowControl(transport) self.server = get_local_addr(transport) self.client = get_remote_addr(transport) self.scheme = "https" if is_ssl(transport) else "http" if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sHTTP connection made", prefix) def connection_lost(self, exc: Optional[Exception]) -> None: self.connections.discard(self) if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sHTTP connection lost", prefix) if self.cycle and not self.cycle.response_complete: self.cycle.disconnected = True if self.cycle is not None: self.cycle.message_event.set() if self.flow is not None: self.flow.resume_writing() if exc is None: self.transport.close() self._unset_keepalive_if_required() self.parser = None def eof_received(self) -> None: pass def _unset_keepalive_if_required(self) -> None: if self.timeout_keep_alive_task is not None: self.timeout_keep_alive_task.cancel() self.timeout_keep_alive_task = None def _get_upgrade(self) -> Optional[bytes]: connection = [] upgrade = None for name, value in self.headers: if name == b"connection": connection = [token.lower().strip() for token in value.split(b",")] if name == b"upgrade": upgrade = value.lower() if b"upgrade" in connection: return upgrade return None def _should_upgrade_to_ws(self, upgrade: Optional[bytes]) -> bool: if upgrade == b"websocket" and self.ws_protocol_class is not None: return True if self.config.ws == "auto": msg = "Unsupported upgrade request." self.logger.warning(msg) msg = "No supported WebSocket library detected. Please use 'pip install uvicorn[standard]', or install 'websockets' or 'wsproto' manually." # noqa: E501 self.logger.warning(msg) return False def _should_upgrade(self) -> bool: upgrade = self._get_upgrade() return self._should_upgrade_to_ws(upgrade) def data_received(self, data: bytes) -> None: self._unset_keepalive_if_required() try: self.parser.feed_data(data) except httptools.HttpParserError: msg = "Invalid HTTP request received." self.logger.warning(msg) self.send_400_response(msg) return except httptools.HttpParserUpgrade: upgrade = self._get_upgrade() if self._should_upgrade_to_ws(upgrade): self.handle_websocket_upgrade() def handle_websocket_upgrade(self) -> None: if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sUpgrading to WebSocket", prefix) self.connections.discard(self) method = self.scope["method"].encode() output = [method, b" ", self.url, b" HTTP/1.1\r\n"] for name, value in self.scope["headers"]: output += [name, b": ", value, b"\r\n"] output.append(b"\r\n") protocol = self.ws_protocol_class( # type: ignore[call-arg, misc] config=self.config, server_state=self.server_state, app_state=self.app_state, ) protocol.connection_made(self.transport) protocol.data_received(b"".join(output)) self.transport.set_protocol(protocol) def send_400_response(self, msg: str) -> None: content = [STATUS_LINE[400]] for name, value in self.server_state.default_headers: content.extend([name, b": ", value, b"\r\n"]) content.extend( [ b"content-type: text/plain; charset=utf-8\r\n", b"content-length: " + str(len(msg)).encode("ascii") + b"\r\n", b"connection: close\r\n", b"\r\n", msg.encode("ascii"), ] ) self.transport.write(b"".join(content)) self.transport.close() def on_message_begin(self) -> None: self.url = b"" self.expect_100_continue = False self.headers = [] self.scope = { # type: ignore[typeddict-item] "type": "http", "asgi": {"version": self.config.asgi_version, "spec_version": "2.3"}, "http_version": "1.1", "server": self.server, "client": self.client, "scheme": self.scheme, "root_path": self.root_path, "headers": self.headers, "state": self.app_state.copy(), } # Parser callbacks def on_url(self, url: bytes) -> None: self.url += url def on_header(self, name: bytes, value: bytes) -> None: name = name.lower() if name == b"expect" and value.lower() == b"100-continue": self.expect_100_continue = True self.headers.append((name, value)) def on_headers_complete(self) -> None: http_version = self.parser.get_http_version() method = self.parser.get_method() self.scope["method"] = method.decode("ascii") if http_version != "1.1": self.scope["http_version"] = http_version if self.parser.should_upgrade() and self._should_upgrade(): return parsed_url = httptools.parse_url(self.url) raw_path = parsed_url.path path = raw_path.decode("ascii") if "%" in path: path = urllib.parse.unquote(path) self.scope["path"] = path self.scope["raw_path"] = raw_path self.scope["query_string"] = parsed_url.query or b"" # Handle 503 responses when 'limit_concurrency' is exceeded. if self.limit_concurrency is not None and ( len(self.connections) >= self.limit_concurrency or len(self.tasks) >= self.limit_concurrency ): app = service_unavailable message = "Exceeded concurrency limit." self.logger.warning(message) else: app = self.app existing_cycle = self.cycle self.cycle = RequestResponseCycle( scope=self.scope, transport=self.transport, flow=self.flow, logger=self.logger, access_logger=self.access_logger, access_log=self.access_log, default_headers=self.server_state.default_headers, message_event=asyncio.Event(), expect_100_continue=self.expect_100_continue, keep_alive=http_version != "1.0", on_response=self.on_response_complete, ) if existing_cycle is None or existing_cycle.response_complete: # Standard case - start processing the request. task = self.loop.create_task(self.cycle.run_asgi(app)) task.add_done_callback(self.tasks.discard) self.tasks.add(task) else: # Pipelined HTTP requests need to be queued up. self.flow.pause_reading() self.pipeline.appendleft((self.cycle, app)) def on_body(self, body: bytes) -> None: if ( self.parser.should_upgrade() and self._should_upgrade() ) or self.cycle.response_complete: return self.cycle.body += body if len(self.cycle.body) > HIGH_WATER_LIMIT: self.flow.pause_reading() self.cycle.message_event.set() def on_message_complete(self) -> None: if ( self.parser.should_upgrade() and self._should_upgrade() ) or self.cycle.response_complete: return self.cycle.more_body = False self.cycle.message_event.set() def on_response_complete(self) -> None: # Callback for pipelined HTTP requests to be started. self.server_state.total_requests += 1 if self.transport.is_closing(): return # Set a short Keep-Alive timeout. self._unset_keepalive_if_required() self.timeout_keep_alive_task = self.loop.call_later( self.timeout_keep_alive, self.timeout_keep_alive_handler ) # Unpause data reads if needed. self.flow.resume_reading() # Unblock any pipelined events. if self.pipeline: cycle, app = self.pipeline.pop() task = self.loop.create_task(cycle.run_asgi(app)) task.add_done_callback(self.tasks.discard) self.tasks.add(task) def shutdown(self) -> None: """ Called by the server to commence a graceful shutdown. """ if self.cycle is None or self.cycle.response_complete: self.transport.close() else: self.cycle.keep_alive = False def pause_writing(self) -> None: """ Called by the transport when the write buffer exceeds the high water mark. """ self.flow.pause_writing() def resume_writing(self) -> None: """ Called by the transport when the write buffer drops below the low water mark. """ self.flow.resume_writing() def timeout_keep_alive_handler(self) -> None: """ Called on a keep-alive connection if no new data is received after a short delay. """ if not self.transport.is_closing(): self.transport.close() class RequestResponseCycle: def __init__( self, scope: "HTTPScope", transport: asyncio.Transport, flow: FlowControl, logger: logging.Logger, access_logger: logging.Logger, access_log: bool, default_headers: List[Tuple[bytes, bytes]], message_event: asyncio.Event, expect_100_continue: bool, keep_alive: bool, on_response: Callable[..., None], ): self.scope = scope self.transport = transport self.flow = flow self.logger = logger self.access_logger = access_logger self.access_log = access_log self.default_headers = default_headers self.message_event = message_event self.on_response = on_response # Connection state self.disconnected = False self.keep_alive = keep_alive self.waiting_for_100_continue = expect_100_continue # Request state self.body = b"" self.more_body = True # Response state self.response_started = False self.response_complete = False self.chunked_encoding: Optional[bool] = None self.expected_content_length = 0 # ASGI exception wrapper async def run_asgi(self, app: "ASGI3Application") -> None: try: result = await app( # type: ignore[func-returns-value] self.scope, self.receive, self.send ) except BaseException as exc: msg = "Exception in ASGI application\n" self.logger.error(msg, exc_info=exc) if not self.response_started: await self.send_500_response() else: self.transport.close() else: if result is not None: msg = "ASGI callable should return None, but returned '%s'." self.logger.error(msg, result) self.transport.close() elif not self.response_started and not self.disconnected: msg = "ASGI callable returned without starting response." self.logger.error(msg) await self.send_500_response() elif not self.response_complete and not self.disconnected: msg = "ASGI callable returned without completing response." self.logger.error(msg) self.transport.close() finally: self.on_response = lambda: None async def send_500_response(self) -> None: response_start_event: "HTTPResponseStartEvent" = { "type": "http.response.start", "status": 500, "headers": [ (b"content-type", b"text/plain; charset=utf-8"), (b"connection", b"close"), ], } await self.send(response_start_event) response_body_event: "HTTPResponseBodyEvent" = { "type": "http.response.body", "body": b"Internal Server Error", "more_body": False, } await self.send(response_body_event) # ASGI interface async def send(self, message: "ASGISendEvent") -> None: message_type = message["type"] if self.flow.write_paused and not self.disconnected: await self.flow.drain() if self.disconnected: return if not self.response_started: # Sending response status line and headers if message_type != "http.response.start": msg = "Expected ASGI message 'http.response.start', but got '%s'." raise RuntimeError(msg % message_type) message = cast("HTTPResponseStartEvent", message) self.response_started = True self.waiting_for_100_continue = False status_code = message["status"] headers = self.default_headers + list(message.get("headers", [])) if CLOSE_HEADER in self.scope["headers"] and CLOSE_HEADER not in headers: headers = headers + [CLOSE_HEADER] if self.access_log: self.access_logger.info( '%s - "%s %s HTTP/%s" %d', get_client_addr(self.scope), self.scope["method"], get_path_with_query_string(self.scope), self.scope["http_version"], status_code, ) # Write response status line and headers content = [STATUS_LINE[status_code]] for name, value in headers: if HEADER_RE.search(name): raise RuntimeError("Invalid HTTP header name.") if HEADER_VALUE_RE.search(value): raise RuntimeError("Invalid HTTP header value.") name = name.lower() if name == b"content-length" and self.chunked_encoding is None: self.expected_content_length = int(value.decode()) self.chunked_encoding = False elif name == b"transfer-encoding" and value.lower() == b"chunked": self.expected_content_length = 0 self.chunked_encoding = True elif name == b"connection" and value.lower() == b"close": self.keep_alive = False content.extend([name, b": ", value, b"\r\n"]) if ( self.chunked_encoding is None and self.scope["method"] != "HEAD" and status_code not in (204, 304) ): # Neither content-length nor transfer-encoding specified self.chunked_encoding = True content.append(b"transfer-encoding: chunked\r\n") content.append(b"\r\n") self.transport.write(b"".join(content)) elif not self.response_complete: # Sending response body if message_type != "http.response.body": msg = "Expected ASGI message 'http.response.body', but got '%s'." raise RuntimeError(msg % message_type) body = cast(bytes, message.get("body", b"")) more_body = message.get("more_body", False) # Write response body if self.scope["method"] == "HEAD": self.expected_content_length = 0 elif self.chunked_encoding: if body: content = [b"%x\r\n" % len(body), body, b"\r\n"] else: content = [] if not more_body: content.append(b"0\r\n\r\n") self.transport.write(b"".join(content)) else: num_bytes = len(body) if num_bytes > self.expected_content_length: raise RuntimeError("Response content longer than Content-Length") else: self.expected_content_length -= num_bytes self.transport.write(body) # Handle response completion if not more_body: if self.expected_content_length != 0: raise RuntimeError("Response content shorter than Content-Length") self.response_complete = True self.message_event.set() if not self.keep_alive: self.transport.close() self.on_response() else: # Response already sent msg = "Unexpected ASGI message '%s' sent, after response already completed." raise RuntimeError(msg % message_type) async def receive(self) -> "ASGIReceiveEvent": if self.waiting_for_100_continue and not self.transport.is_closing(): self.transport.write(b"HTTP/1.1 100 Continue\r\n\r\n") self.waiting_for_100_continue = False if not self.disconnected and not self.response_complete: self.flow.resume_reading() await self.message_event.wait() self.message_event.clear() message: "Union[HTTPDisconnectEvent, HTTPRequestEvent]" if self.disconnected or self.response_complete: message = {"type": "http.disconnect"} else: message = { "type": "http.request", "body": self.body, "more_body": self.more_body, } self.body = b"" return message
21,866
Python
34.730392
165
0.570017
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multipart/exceptions.py
class FormParserError(ValueError): """Base error class for our form parser.""" pass class ParseError(FormParserError): """This exception (or a subclass) is raised when there is an error while parsing something. """ #: This is the offset in the input data chunk (*NOT* the overall stream) in #: which the parse error occurred. It will be -1 if not specified. offset = -1 class MultipartParseError(ParseError): """This is a specific error that is raised when the MultipartParser detects an error while parsing. """ pass class QuerystringParseError(ParseError): """This is a specific error that is raised when the QuerystringParser detects an error while parsing. """ pass class DecodeError(ParseError): """This exception is raised when there is a decoding error - for example with the Base64Decoder or QuotedPrintableDecoder. """ pass # On Python 3.3, IOError is the same as OSError, so we don't want to inherit # from both of them. We handle this case below. if IOError is not OSError: # pragma: no cover class FileError(FormParserError, IOError, OSError): """Exception class for problems with the File class.""" pass else: # pragma: no cover class FileError(FormParserError, OSError): """Exception class for problems with the File class.""" pass
1,410
Python
29.021276
79
0.678723
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multipart/multipart.py
from .decoders import * from .exceptions import * import os import re import sys import shutil import logging import tempfile from io import BytesIO from numbers import Number # Unique missing object. _missing = object() # States for the querystring parser. STATE_BEFORE_FIELD = 0 STATE_FIELD_NAME = 1 STATE_FIELD_DATA = 2 # States for the multipart parser STATE_START = 0 STATE_START_BOUNDARY = 1 STATE_HEADER_FIELD_START = 2 STATE_HEADER_FIELD = 3 STATE_HEADER_VALUE_START = 4 STATE_HEADER_VALUE = 5 STATE_HEADER_VALUE_ALMOST_DONE = 6 STATE_HEADERS_ALMOST_DONE = 7 STATE_PART_DATA_START = 8 STATE_PART_DATA = 9 STATE_PART_DATA_END = 10 STATE_END = 11 STATES = [ "START", "START_BOUNDARY", "HEADER_FIELD_START", "HEADER_FIELD", "HEADER_VALUE_START", "HEADER_VALUE", "HEADER_VALUE_ALMOST_DONE", "HEADRES_ALMOST_DONE", "PART_DATA_START", "PART_DATA", "PART_DATA_END", "END" ] # Flags for the multipart parser. FLAG_PART_BOUNDARY = 1 FLAG_LAST_BOUNDARY = 2 # Get constants. Since iterating over a str on Python 2 gives you a 1-length # string, but iterating over a bytes object on Python 3 gives you an integer, # we need to save these constants. CR = b'\r'[0] LF = b'\n'[0] COLON = b':'[0] SPACE = b' '[0] HYPHEN = b'-'[0] AMPERSAND = b'&'[0] SEMICOLON = b';'[0] LOWER_A = b'a'[0] LOWER_Z = b'z'[0] NULL = b'\x00'[0] # Lower-casing a character is different, because of the difference between # str on Py2, and bytes on Py3. Same with getting the ordinal value of a byte, # and joining a list of bytes together. # These functions abstract that. lower_char = lambda c: c | 0x20 ord_char = lambda c: c join_bytes = lambda b: bytes(list(b)) # These are regexes for parsing header values. SPECIAL_CHARS = re.escape(b'()<>@,;:\\"/[]?={} \t') QUOTED_STR = br'"(?:\\.|[^"])*"' VALUE_STR = br'(?:[^' + SPECIAL_CHARS + br']+|' + QUOTED_STR + br')' OPTION_RE_STR = ( br'(?:;|^)\s*([^' + SPECIAL_CHARS + br']+)\s*=\s*(' + VALUE_STR + br')' ) OPTION_RE = re.compile(OPTION_RE_STR) QUOTE = b'"'[0] def parse_options_header(value): """ Parses a Content-Type header into a value in the following format: (content_type, {parameters}) """ if not value: return (b'', {}) # If we are passed a string, we assume that it conforms to WSGI and does # not contain any code point that's not in latin-1. if isinstance(value, str): # pragma: no cover value = value.encode('latin-1') # If we have no options, return the string as-is. if b';' not in value: return (value.lower().strip(), {}) # Split at the first semicolon, to get our value and then options. ctype, rest = value.split(b';', 1) options = {} # Parse the options. for match in OPTION_RE.finditer(rest): key = match.group(1).lower() value = match.group(2) if value[0] == QUOTE and value[-1] == QUOTE: # Unquote the value. value = value[1:-1] value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"') # If the value is a filename, we need to fix a bug on IE6 that sends # the full file path instead of the filename. if key == b'filename': if value[1:3] == b':\\' or value[:2] == b'\\\\': value = value.split(b'\\')[-1] options[key] = value return ctype, options class Field: """A Field object represents a (parsed) form field. It represents a single field with a corresponding name and value. The name that a :class:`Field` will be instantiated with is the same name that would be found in the following HTML:: <input name="name_goes_here" type="text"/> This class defines two methods, :meth:`on_data` and :meth:`on_end`, that will be called when data is written to the Field, and when the Field is finalized, respectively. :param name: the name of the form field """ def __init__(self, name): self._name = name self._value = [] # We cache the joined version of _value for speed. self._cache = _missing @classmethod def from_value(klass, name, value): """Create an instance of a :class:`Field`, and set the corresponding value - either None or an actual value. This method will also finalize the Field itself. :param name: the name of the form field :param value: the value of the form field - either a bytestring or None """ f = klass(name) if value is None: f.set_none() else: f.write(value) f.finalize() return f def write(self, data): """Write some data into the form field. :param data: a bytestring """ return self.on_data(data) def on_data(self, data): """This method is a callback that will be called whenever data is written to the Field. :param data: a bytestring """ self._value.append(data) self._cache = _missing return len(data) def on_end(self): """This method is called whenever the Field is finalized. """ if self._cache is _missing: self._cache = b''.join(self._value) def finalize(self): """Finalize the form field. """ self.on_end() def close(self): """Close the Field object. This will free any underlying cache. """ # Free our value array. if self._cache is _missing: self._cache = b''.join(self._value) del self._value def set_none(self): """Some fields in a querystring can possibly have a value of None - for example, the string "foo&bar=&baz=asdf" will have a field with the name "foo" and value None, one with name "bar" and value "", and one with name "baz" and value "asdf". Since the write() interface doesn't support writing None, this function will set the field value to None. """ self._cache = None @property def field_name(self): """This property returns the name of the field.""" return self._name @property def value(self): """This property returns the value of the form field.""" if self._cache is _missing: self._cache = b''.join(self._value) return self._cache def __eq__(self, other): if isinstance(other, Field): return ( self.field_name == other.field_name and self.value == other.value ) else: return NotImplemented def __repr__(self): if len(self.value) > 97: # We get the repr, and then insert three dots before the final # quote. v = repr(self.value[:97])[:-1] + "...'" else: v = repr(self.value) return "{}(field_name={!r}, value={})".format( self.__class__.__name__, self.field_name, v ) class File: """This class represents an uploaded file. It handles writing file data to either an in-memory file or a temporary file on-disk, if the optional threshold is passed. There are some options that can be passed to the File to change behavior of the class. Valid options are as follows: .. list-table:: :widths: 15 5 5 30 :header-rows: 1 * - Name - Type - Default - Description * - UPLOAD_DIR - `str` - None - The directory to store uploaded files in. If this is None, a temporary file will be created in the system's standard location. * - UPLOAD_DELETE_TMP - `bool` - True - Delete automatically created TMP file * - UPLOAD_KEEP_FILENAME - `bool` - False - Whether or not to keep the filename of the uploaded file. If True, then the filename will be converted to a safe representation (e.g. by removing any invalid path segments), and then saved with the same name). Otherwise, a temporary name will be used. * - UPLOAD_KEEP_EXTENSIONS - `bool` - False - Whether or not to keep the uploaded file's extension. If False, the file will be saved with the default temporary extension (usually ".tmp"). Otherwise, the file's extension will be maintained. Note that this will properly combine with the UPLOAD_KEEP_FILENAME setting. * - MAX_MEMORY_FILE_SIZE - `int` - 1 MiB - The maximum number of bytes of a File to keep in memory. By default, the contents of a File are kept into memory until a certain limit is reached, after which the contents of the File are written to a temporary file. This behavior can be disabled by setting this value to an appropriately large value (or, for example, infinity, such as `float('inf')`. :param file_name: The name of the file that this :class:`File` represents :param field_name: The field name that uploaded this file. Note that this can be None, if, for example, the file was uploaded with Content-Type application/octet-stream :param config: The configuration for this File. See above for valid configuration keys and their corresponding values. """ def __init__(self, file_name, field_name=None, config={}): # Save configuration, set other variables default. self.logger = logging.getLogger(__name__) self._config = config self._in_memory = True self._bytes_written = 0 self._fileobj = BytesIO() # Save the provided field/file name. self._field_name = field_name self._file_name = file_name # Our actual file name is None by default, since, depending on our # config, we may not actually use the provided name. self._actual_file_name = None # Split the extension from the filename. if file_name is not None: base, ext = os.path.splitext(file_name) self._file_base = base self._ext = ext @property def field_name(self): """The form field associated with this file. May be None if there isn't one, for example when we have an application/octet-stream upload. """ return self._field_name @property def file_name(self): """The file name given in the upload request. """ return self._file_name @property def actual_file_name(self): """The file name that this file is saved as. Will be None if it's not currently saved on disk. """ return self._actual_file_name @property def file_object(self): """The file object that we're currently writing to. Note that this will either be an instance of a :class:`io.BytesIO`, or a regular file object. """ return self._fileobj @property def size(self): """The total size of this file, counted as the number of bytes that currently have been written to the file. """ return self._bytes_written @property def in_memory(self): """A boolean representing whether or not this file object is currently stored in-memory or on-disk. """ return self._in_memory def flush_to_disk(self): """If the file is already on-disk, do nothing. Otherwise, copy from the in-memory buffer to a disk file, and then reassign our internal file object to this new disk file. Note that if you attempt to flush a file that is already on-disk, a warning will be logged to this module's logger. """ if not self._in_memory: self.logger.warning( "Trying to flush to disk when we're not in memory" ) return # Go back to the start of our file. self._fileobj.seek(0) # Open a new file. new_file = self._get_disk_file() # Copy the file objects. shutil.copyfileobj(self._fileobj, new_file) # Seek to the new position in our new file. new_file.seek(self._bytes_written) # Reassign the fileobject. old_fileobj = self._fileobj self._fileobj = new_file # We're no longer in memory. self._in_memory = False # Close the old file object. old_fileobj.close() def _get_disk_file(self): """This function is responsible for getting a file object on-disk for us. """ self.logger.info("Opening a file on disk") file_dir = self._config.get('UPLOAD_DIR') keep_filename = self._config.get('UPLOAD_KEEP_FILENAME', False) keep_extensions = self._config.get('UPLOAD_KEEP_EXTENSIONS', False) delete_tmp = self._config.get('UPLOAD_DELETE_TMP', True) # If we have a directory and are to keep the filename... if file_dir is not None and keep_filename: self.logger.info("Saving with filename in: %r", file_dir) # Build our filename. # TODO: what happens if we don't have a filename? fname = self._file_base if keep_extensions: fname = fname + self._ext path = os.path.join(file_dir, fname) try: self.logger.info("Opening file: %r", path) tmp_file = open(path, 'w+b') except OSError as e: tmp_file = None self.logger.exception("Error opening temporary file") raise FileError("Error opening temporary file: %r" % path) else: # Build options array. # Note that on Python 3, tempfile doesn't support byte names. We # encode our paths using the default filesystem encoding. options = {} if keep_extensions: ext = self._ext if isinstance(ext, bytes): ext = ext.decode(sys.getfilesystemencoding()) options['suffix'] = ext if file_dir is not None: d = file_dir if isinstance(d, bytes): d = d.decode(sys.getfilesystemencoding()) options['dir'] = d options['delete'] = delete_tmp # Create a temporary (named) file with the appropriate settings. self.logger.info("Creating a temporary file with options: %r", options) try: tmp_file = tempfile.NamedTemporaryFile(**options) except OSError: self.logger.exception("Error creating named temporary file") raise FileError("Error creating named temporary file") fname = tmp_file.name # Encode filename as bytes. if isinstance(fname, str): fname = fname.encode(sys.getfilesystemencoding()) self._actual_file_name = fname return tmp_file def write(self, data): """Write some data to the File. :param data: a bytestring """ return self.on_data(data) def on_data(self, data): """This method is a callback that will be called whenever data is written to the File. :param data: a bytestring """ pos = self._fileobj.tell() bwritten = self._fileobj.write(data) # true file objects write returns None if bwritten is None: bwritten = self._fileobj.tell() - pos # If the bytes written isn't the same as the length, just return. if bwritten != len(data): self.logger.warning("bwritten != len(data) (%d != %d)", bwritten, len(data)) return bwritten # Keep track of how many bytes we've written. self._bytes_written += bwritten # If we're in-memory and are over our limit, we create a file. if (self._in_memory and self._config.get('MAX_MEMORY_FILE_SIZE') is not None and (self._bytes_written > self._config.get('MAX_MEMORY_FILE_SIZE'))): self.logger.info("Flushing to disk") self.flush_to_disk() # Return the number of bytes written. return bwritten def on_end(self): """This method is called whenever the Field is finalized. """ # Flush the underlying file object self._fileobj.flush() def finalize(self): """Finalize the form file. This will not close the underlying file, but simply signal that we are finished writing to the File. """ self.on_end() def close(self): """Close the File object. This will actually close the underlying file object (whether it's a :class:`io.BytesIO` or an actual file object). """ self._fileobj.close() def __repr__(self): return "{}(file_name={!r}, field_name={!r})".format( self.__class__.__name__, self.file_name, self.field_name ) class BaseParser: """This class is the base class for all parsers. It contains the logic for calling and adding callbacks. A callback can be one of two different forms. "Notification callbacks" are callbacks that are called when something happens - for example, when a new part of a multipart message is encountered by the parser. "Data callbacks" are called when we get some sort of data - for example, part of the body of a multipart chunk. Notification callbacks are called with no parameters, whereas data callbacks are called with three, as follows:: data_callback(data, start, end) The "data" parameter is a bytestring (i.e. "foo" on Python 2, or b"foo" on Python 3). "start" and "end" are integer indexes into the "data" string that represent the data of interest. Thus, in a data callback, the slice `data[start:end]` represents the data that the callback is "interested in". The callback is not passed a copy of the data, since copying severely hurts performance. """ def __init__(self): self.logger = logging.getLogger(__name__) def callback(self, name, data=None, start=None, end=None): """This function calls a provided callback with some data. If the callback is not set, will do nothing. :param name: The name of the callback to call (as a string). :param data: Data to pass to the callback. If None, then it is assumed that the callback is a notification callback, and no parameters are given. :param end: An integer that is passed to the data callback. :param start: An integer that is passed to the data callback. """ name = "on_" + name func = self.callbacks.get(name) if func is None: return # Depending on whether we're given a buffer... if data is not None: # Don't do anything if we have start == end. if start is not None and start == end: return self.logger.debug("Calling %s with data[%d:%d]", name, start, end) func(data, start, end) else: self.logger.debug("Calling %s with no data", name) func() def set_callback(self, name, new_func): """Update the function for a callback. Removes from the callbacks dict if new_func is None. :param name: The name of the callback to call (as a string). :param new_func: The new function for the callback. If None, then the callback will be removed (with no error if it does not exist). """ if new_func is None: self.callbacks.pop('on_' + name, None) else: self.callbacks['on_' + name] = new_func def close(self): pass # pragma: no cover def finalize(self): pass # pragma: no cover def __repr__(self): return "%s()" % self.__class__.__name__ class OctetStreamParser(BaseParser): """This parser parses an octet-stream request body and calls callbacks when incoming data is received. Callbacks are as follows: .. list-table:: :widths: 15 10 30 :header-rows: 1 * - Callback Name - Parameters - Description * - on_start - None - Called when the first data is parsed. * - on_data - data, start, end - Called for each data chunk that is parsed. * - on_end - None - Called when the parser is finished parsing all data. :param callbacks: A dictionary of callbacks. See the documentation for :class:`BaseParser`. :param max_size: The maximum size of body to parse. Defaults to infinity - i.e. unbounded. """ def __init__(self, callbacks={}, max_size=float('inf')): super().__init__() self.callbacks = callbacks self._started = False if not isinstance(max_size, Number) or max_size < 1: raise ValueError("max_size must be a positive number, not %r" % max_size) self.max_size = max_size self._current_size = 0 def write(self, data): """Write some data to the parser, which will perform size verification, and then pass the data to the underlying callback. :param data: a bytestring """ if not self._started: self.callback('start') self._started = True # Truncate data length. data_len = len(data) if (self._current_size + data_len) > self.max_size: # We truncate the length of data that we are to process. new_size = int(self.max_size - self._current_size) self.logger.warning("Current size is %d (max %d), so truncating " "data length from %d to %d", self._current_size, self.max_size, data_len, new_size) data_len = new_size # Increment size, then callback, in case there's an exception. self._current_size += data_len self.callback('data', data, 0, data_len) return data_len def finalize(self): """Finalize this parser, which signals to that we are finished parsing, and sends the on_end callback. """ self.callback('end') def __repr__(self): return "%s()" % self.__class__.__name__ class QuerystringParser(BaseParser): """This is a streaming querystring parser. It will consume data, and call the callbacks given when it has data. .. list-table:: :widths: 15 10 30 :header-rows: 1 * - Callback Name - Parameters - Description * - on_field_start - None - Called when a new field is encountered. * - on_field_name - data, start, end - Called when a portion of a field's name is encountered. * - on_field_data - data, start, end - Called when a portion of a field's data is encountered. * - on_field_end - None - Called when the end of a field is encountered. * - on_end - None - Called when the parser is finished parsing all data. :param callbacks: A dictionary of callbacks. See the documentation for :class:`BaseParser`. :param strict_parsing: Whether or not to parse the body strictly. Defaults to False. If this is set to True, then the behavior of the parser changes as the following: if a field has a value with an equal sign (e.g. "foo=bar", or "foo="), it is always included. If a field has no equals sign (e.g. "...&name&..."), it will be treated as an error if 'strict_parsing' is True, otherwise included. If an error is encountered, then a :class:`multipart.exceptions.QuerystringParseError` will be raised. :param max_size: The maximum size of body to parse. Defaults to infinity - i.e. unbounded. """ def __init__(self, callbacks={}, strict_parsing=False, max_size=float('inf')): super().__init__() self.state = STATE_BEFORE_FIELD self._found_sep = False self.callbacks = callbacks # Max-size stuff if not isinstance(max_size, Number) or max_size < 1: raise ValueError("max_size must be a positive number, not %r" % max_size) self.max_size = max_size self._current_size = 0 # Should parsing be strict? self.strict_parsing = strict_parsing def write(self, data): """Write some data to the parser, which will perform size verification, parse into either a field name or value, and then pass the corresponding data to the underlying callback. If an error is encountered while parsing, a QuerystringParseError will be raised. The "offset" attribute of the raised exception will be set to the offset in the input data chunk (NOT the overall stream) that caused the error. :param data: a bytestring """ # Handle sizing. data_len = len(data) if (self._current_size + data_len) > self.max_size: # We truncate the length of data that we are to process. new_size = int(self.max_size - self._current_size) self.logger.warning("Current size is %d (max %d), so truncating " "data length from %d to %d", self._current_size, self.max_size, data_len, new_size) data_len = new_size l = 0 try: l = self._internal_write(data, data_len) finally: self._current_size += l return l def _internal_write(self, data, length): state = self.state strict_parsing = self.strict_parsing found_sep = self._found_sep i = 0 while i < length: ch = data[i] # Depending on our state... if state == STATE_BEFORE_FIELD: # If the 'found_sep' flag is set, we've already encountered # and skipped a single separator. If so, we check our strict # parsing flag and decide what to do. Otherwise, we haven't # yet reached a separator, and thus, if we do, we need to skip # it as it will be the boundary between fields that's supposed # to be there. if ch == AMPERSAND or ch == SEMICOLON: if found_sep: # If we're parsing strictly, we disallow blank chunks. if strict_parsing: e = QuerystringParseError( "Skipping duplicate ampersand/semicolon at " "%d" % i ) e.offset = i raise e else: self.logger.debug("Skipping duplicate ampersand/" "semicolon at %d", i) else: # This case is when we're skipping the (first) # separator between fields, so we just set our flag # and continue on. found_sep = True else: # Emit a field-start event, and go to that state. Also, # reset the "found_sep" flag, for the next time we get to # this state. self.callback('field_start') i -= 1 state = STATE_FIELD_NAME found_sep = False elif state == STATE_FIELD_NAME: # Try and find a separator - we ensure that, if we do, we only # look for the equal sign before it. sep_pos = data.find(b'&', i) if sep_pos == -1: sep_pos = data.find(b';', i) # See if we can find an equals sign in the remaining data. If # so, we can immediately emit the field name and jump to the # data state. if sep_pos != -1: equals_pos = data.find(b'=', i, sep_pos) else: equals_pos = data.find(b'=', i) if equals_pos != -1: # Emit this name. self.callback('field_name', data, i, equals_pos) # Jump i to this position. Note that it will then have 1 # added to it below, which means the next iteration of this # loop will inspect the character after the equals sign. i = equals_pos state = STATE_FIELD_DATA else: # No equals sign found. if not strict_parsing: # See also comments in the STATE_FIELD_DATA case below. # If we found the separator, we emit the name and just # end - there's no data callback at all (not even with # a blank value). if sep_pos != -1: self.callback('field_name', data, i, sep_pos) self.callback('field_end') i = sep_pos - 1 state = STATE_BEFORE_FIELD else: # Otherwise, no separator in this block, so the # rest of this chunk must be a name. self.callback('field_name', data, i, length) i = length else: # We're parsing strictly. If we find a separator, # this is an error - we require an equals sign. if sep_pos != -1: e = QuerystringParseError( "When strict_parsing is True, we require an " "equals sign in all field chunks. Did not " "find one in the chunk that starts at %d" % (i,) ) e.offset = i raise e # No separator in the rest of this chunk, so it's just # a field name. self.callback('field_name', data, i, length) i = length elif state == STATE_FIELD_DATA: # Try finding either an ampersand or a semicolon after this # position. sep_pos = data.find(b'&', i) if sep_pos == -1: sep_pos = data.find(b';', i) # If we found it, callback this bit as data and then go back # to expecting to find a field. if sep_pos != -1: self.callback('field_data', data, i, sep_pos) self.callback('field_end') # Note that we go to the separator, which brings us to the # "before field" state. This allows us to properly emit # "field_start" events only when we actually have data for # a field of some sort. i = sep_pos - 1 state = STATE_BEFORE_FIELD # Otherwise, emit the rest as data and finish. else: self.callback('field_data', data, i, length) i = length else: # pragma: no cover (error case) msg = "Reached an unknown state %d at %d" % (state, i) self.logger.warning(msg) e = QuerystringParseError(msg) e.offset = i raise e i += 1 self.state = state self._found_sep = found_sep return len(data) def finalize(self): """Finalize this parser, which signals to that we are finished parsing, if we're still in the middle of a field, an on_field_end callback, and then the on_end callback. """ # If we're currently in the middle of a field, we finish it. if self.state == STATE_FIELD_DATA: self.callback('field_end') self.callback('end') def __repr__(self): return "{}(strict_parsing={!r}, max_size={!r})".format( self.__class__.__name__, self.strict_parsing, self.max_size ) class MultipartParser(BaseParser): """This class is a streaming multipart/form-data parser. .. list-table:: :widths: 15 10 30 :header-rows: 1 * - Callback Name - Parameters - Description * - on_part_begin - None - Called when a new part of the multipart message is encountered. * - on_part_data - data, start, end - Called when a portion of a part's data is encountered. * - on_part_end - None - Called when the end of a part is reached. * - on_header_begin - None - Called when we've found a new header in a part of a multipart message * - on_header_field - data, start, end - Called each time an additional portion of a header is read (i.e. the part of the header that is before the colon; the "Foo" in "Foo: Bar"). * - on_header_value - data, start, end - Called when we get data for a header. * - on_header_end - None - Called when the current header is finished - i.e. we've reached the newline at the end of the header. * - on_headers_finished - None - Called when all headers are finished, and before the part data starts. * - on_end - None - Called when the parser is finished parsing all data. :param boundary: The multipart boundary. This is required, and must match what is given in the HTTP request - usually in the Content-Type header. :param callbacks: A dictionary of callbacks. See the documentation for :class:`BaseParser`. :param max_size: The maximum size of body to parse. Defaults to infinity - i.e. unbounded. """ def __init__(self, boundary, callbacks={}, max_size=float('inf')): # Initialize parser state. super().__init__() self.state = STATE_START self.index = self.flags = 0 self.callbacks = callbacks if not isinstance(max_size, Number) or max_size < 1: raise ValueError("max_size must be a positive number, not %r" % max_size) self.max_size = max_size self._current_size = 0 # Setup marks. These are used to track the state of data received. self.marks = {} # TODO: Actually use this rather than the dumb version we currently use # # Precompute the skip table for the Boyer-Moore-Horspool algorithm. # skip = [len(boundary) for x in range(256)] # for i in range(len(boundary) - 1): # skip[ord_char(boundary[i])] = len(boundary) - i - 1 # # # We use a tuple since it's a constant, and marginally faster. # self.skip = tuple(skip) # Save our boundary. if isinstance(boundary, str): # pragma: no cover boundary = boundary.encode('latin-1') self.boundary = b'\r\n--' + boundary # Get a set of characters that belong to our boundary. self.boundary_chars = frozenset(self.boundary) # We also create a lookbehind list. # Note: the +8 is since we can have, at maximum, "\r\n--" + boundary + # "--\r\n" at the final boundary, and the length of '\r\n--' and # '--\r\n' is 8 bytes. self.lookbehind = [NULL for x in range(len(boundary) + 8)] def write(self, data): """Write some data to the parser, which will perform size verification, and then parse the data into the appropriate location (e.g. header, data, etc.), and pass this on to the underlying callback. If an error is encountered, a MultipartParseError will be raised. The "offset" attribute on the raised exception will be set to the offset of the byte in the input chunk that caused the error. :param data: a bytestring """ # Handle sizing. data_len = len(data) if (self._current_size + data_len) > self.max_size: # We truncate the length of data that we are to process. new_size = int(self.max_size - self._current_size) self.logger.warning("Current size is %d (max %d), so truncating " "data length from %d to %d", self._current_size, self.max_size, data_len, new_size) data_len = new_size l = 0 try: l = self._internal_write(data, data_len) finally: self._current_size += l return l def _internal_write(self, data, length): # Get values from locals. boundary = self.boundary # Get our state, flags and index. These are persisted between calls to # this function. state = self.state index = self.index flags = self.flags # Our index defaults to 0. i = 0 # Set a mark. def set_mark(name): self.marks[name] = i # Remove a mark. def delete_mark(name, reset=False): self.marks.pop(name, None) # Helper function that makes calling a callback with data easier. The # 'remaining' parameter will callback from the marked value until the # end of the buffer, and reset the mark, instead of deleting it. This # is used at the end of the function to call our callbacks with any # remaining data in this chunk. def data_callback(name, remaining=False): marked_index = self.marks.get(name) if marked_index is None: return # If we're getting remaining data, we ignore the current i value # and just call with the remaining data. if remaining: self.callback(name, data, marked_index, length) self.marks[name] = 0 # Otherwise, we call it from the mark to the current byte we're # processing. else: self.callback(name, data, marked_index, i) self.marks.pop(name, None) # For each byte... while i < length: c = data[i] if state == STATE_START: # Skip leading newlines if c == CR or c == LF: i += 1 self.logger.debug("Skipping leading CR/LF at %d", i) continue # index is used as in index into our boundary. Set to 0. index = 0 # Move to the next state, but decrement i so that we re-process # this character. state = STATE_START_BOUNDARY i -= 1 elif state == STATE_START_BOUNDARY: # Check to ensure that the last 2 characters in our boundary # are CRLF. if index == len(boundary) - 2: if c != CR: # Error! msg = "Did not find CR at end of boundary (%d)" % (i,) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e index += 1 elif index == len(boundary) - 2 + 1: if c != LF: msg = "Did not find LF at end of boundary (%d)" % (i,) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e # The index is now used for indexing into our boundary. index = 0 # Callback for the start of a part. self.callback('part_begin') # Move to the next character and state. state = STATE_HEADER_FIELD_START else: # Check to ensure our boundary matches if c != boundary[index + 2]: msg = "Did not find boundary character %r at index " \ "%d" % (c, index + 2) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e # Increment index into boundary and continue. index += 1 elif state == STATE_HEADER_FIELD_START: # Mark the start of a header field here, reset the index, and # continue parsing our header field. index = 0 # Set a mark of our header field. set_mark('header_field') # Move to parsing header fields. state = STATE_HEADER_FIELD i -= 1 elif state == STATE_HEADER_FIELD: # If we've reached a CR at the beginning of a header, it means # that we've reached the second of 2 newlines, and so there are # no more headers to parse. if c == CR: delete_mark('header_field') state = STATE_HEADERS_ALMOST_DONE i += 1 continue # Increment our index in the header. index += 1 # Do nothing if we encounter a hyphen. if c == HYPHEN: pass # If we've reached a colon, we're done with this header. elif c == COLON: # A 0-length header is an error. if index == 1: msg = "Found 0-length header at %d" % (i,) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e # Call our callback with the header field. data_callback('header_field') # Move to parsing the header value. state = STATE_HEADER_VALUE_START else: # Lower-case this character, and ensure that it is in fact # a valid letter. If not, it's an error. cl = lower_char(c) if cl < LOWER_A or cl > LOWER_Z: msg = "Found non-alphanumeric character %r in " \ "header at %d" % (c, i) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e elif state == STATE_HEADER_VALUE_START: # Skip leading spaces. if c == SPACE: i += 1 continue # Mark the start of the header value. set_mark('header_value') # Move to the header-value state, reprocessing this character. state = STATE_HEADER_VALUE i -= 1 elif state == STATE_HEADER_VALUE: # If we've got a CR, we're nearly done our headers. Otherwise, # we do nothing and just move past this character. if c == CR: data_callback('header_value') self.callback('header_end') state = STATE_HEADER_VALUE_ALMOST_DONE elif state == STATE_HEADER_VALUE_ALMOST_DONE: # The last character should be a LF. If not, it's an error. if c != LF: msg = "Did not find LF character at end of header " \ "(found %r)" % (c,) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e # Move back to the start of another header. Note that if that # state detects ANOTHER newline, it'll trigger the end of our # headers. state = STATE_HEADER_FIELD_START elif state == STATE_HEADERS_ALMOST_DONE: # We're almost done our headers. This is reached when we parse # a CR at the beginning of a header, so our next character # should be a LF, or it's an error. if c != LF: msg = f"Did not find LF at end of headers (found {c!r})" self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e self.callback('headers_finished') state = STATE_PART_DATA_START elif state == STATE_PART_DATA_START: # Mark the start of our part data. set_mark('part_data') # Start processing part data, including this character. state = STATE_PART_DATA i -= 1 elif state == STATE_PART_DATA: # We're processing our part data right now. During this, we # need to efficiently search for our boundary, since any data # on any number of lines can be a part of the current data. # We use the Boyer-Moore-Horspool algorithm to efficiently # search through the remainder of the buffer looking for our # boundary. # Save the current value of our index. We use this in case we # find part of a boundary, but it doesn't match fully. prev_index = index # Set up variables. boundary_length = len(boundary) boundary_end = boundary_length - 1 data_length = length boundary_chars = self.boundary_chars # If our index is 0, we're starting a new part, so start our # search. if index == 0: # Search forward until we either hit the end of our buffer, # or reach a character that's in our boundary. i += boundary_end while i < data_length - 1 and data[i] not in boundary_chars: i += boundary_length # Reset i back the length of our boundary, which is the # earliest possible location that could be our match (i.e. # if we've just broken out of our loop since we saw the # last character in our boundary) i -= boundary_end c = data[i] # Now, we have a couple of cases here. If our index is before # the end of the boundary... if index < boundary_length: # If the character matches... if boundary[index] == c: # If we found a match for our boundary, we send the # existing data. if index == 0: data_callback('part_data') # The current character matches, so continue! index += 1 else: index = 0 # Our index is equal to the length of our boundary! elif index == boundary_length: # First we increment it. index += 1 # Now, if we've reached a newline, we need to set this as # the potential end of our boundary. if c == CR: flags |= FLAG_PART_BOUNDARY # Otherwise, if this is a hyphen, we might be at the last # of all boundaries. elif c == HYPHEN: flags |= FLAG_LAST_BOUNDARY # Otherwise, we reset our index, since this isn't either a # newline or a hyphen. else: index = 0 # Our index is right after the part boundary, which should be # a LF. elif index == boundary_length + 1: # If we're at a part boundary (i.e. we've seen a CR # character already)... if flags & FLAG_PART_BOUNDARY: # We need a LF character next. if c == LF: # Unset the part boundary flag. flags &= (~FLAG_PART_BOUNDARY) # Callback indicating that we've reached the end of # a part, and are starting a new one. self.callback('part_end') self.callback('part_begin') # Move to parsing new headers. index = 0 state = STATE_HEADER_FIELD_START i += 1 continue # We didn't find an LF character, so no match. Reset # our index and clear our flag. index = 0 flags &= (~FLAG_PART_BOUNDARY) # Otherwise, if we're at the last boundary (i.e. we've # seen a hyphen already)... elif flags & FLAG_LAST_BOUNDARY: # We need a second hyphen here. if c == HYPHEN: # Callback to end the current part, and then the # message. self.callback('part_end') self.callback('end') state = STATE_END else: # No match, so reset index. index = 0 # If we have an index, we need to keep this byte for later, in # case we can't match the full boundary. if index > 0: self.lookbehind[index - 1] = c # Otherwise, our index is 0. If the previous index is not, it # means we reset something, and we need to take the data we # thought was part of our boundary and send it along as actual # data. elif prev_index > 0: # Callback to write the saved data. lb_data = join_bytes(self.lookbehind) self.callback('part_data', lb_data, 0, prev_index) # Overwrite our previous index. prev_index = 0 # Re-set our mark for part data. set_mark('part_data') # Re-consider the current character, since this could be # the start of the boundary itself. i -= 1 elif state == STATE_END: # Do nothing and just consume a byte in the end state. if c not in (CR, LF): self.logger.warning("Consuming a byte '0x%x' in the end state", c) else: # pragma: no cover (error case) # We got into a strange state somehow! Just stop processing. msg = "Reached an unknown state %d at %d" % (state, i) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e # Move to the next byte. i += 1 # We call our callbacks with any remaining data. Note that we pass # the 'remaining' flag, which sets the mark back to 0 instead of # deleting it, if it's found. This is because, if the mark is found # at this point, we assume that there's data for one of these things # that has been parsed, but not yet emitted. And, as such, it implies # that we haven't yet reached the end of this 'thing'. So, by setting # the mark to 0, we cause any data callbacks that take place in future # calls to this function to start from the beginning of that buffer. data_callback('header_field', True) data_callback('header_value', True) data_callback('part_data', True) # Save values to locals. self.state = state self.index = index self.flags = flags # Return our data length to indicate no errors, and that we processed # all of it. return length def finalize(self): """Finalize this parser, which signals to that we are finished parsing. Note: It does not currently, but in the future, it will verify that we are in the final state of the parser (i.e. the end of the multipart message is well-formed), and, if not, throw an error. """ # TODO: verify that we're in the state STATE_END, otherwise throw an # error or otherwise state that we're not finished parsing. pass def __repr__(self): return f"{self.__class__.__name__}(boundary={self.boundary!r})" class FormParser: """This class is the all-in-one form parser. Given all the information necessary to parse a form, it will instantiate the correct parser, create the proper :class:`Field` and :class:`File` classes to store the data that is parsed, and call the two given callbacks with each field and file as they become available. :param content_type: The Content-Type of the incoming request. This is used to select the appropriate parser. :param on_field: The callback to call when a field has been parsed and is ready for usage. See above for parameters. :param on_file: The callback to call when a file has been parsed and is ready for usage. See above for parameters. :param on_end: An optional callback to call when all fields and files in a request has been parsed. Can be None. :param boundary: If the request is a multipart/form-data request, this should be the boundary of the request, as given in the Content-Type header, as a bytestring. :param file_name: If the request is of type application/octet-stream, then the body of the request will not contain any information about the uploaded file. In such cases, you can provide the file name of the uploaded file manually. :param FileClass: The class to use for uploaded files. Defaults to :class:`File`, but you can provide your own class if you wish to customize behaviour. The class will be instantiated as FileClass(file_name, field_name), and it must provide the following functions:: file_instance.write(data) file_instance.finalize() file_instance.close() :param FieldClass: The class to use for uploaded fields. Defaults to :class:`Field`, but you can provide your own class if you wish to customize behaviour. The class will be instantiated as FieldClass(field_name), and it must provide the following functions:: field_instance.write(data) field_instance.finalize() field_instance.close() :param config: Configuration to use for this FormParser. The default values are taken from the DEFAULT_CONFIG value, and then any keys present in this dictionary will overwrite the default values. """ #: This is the default configuration for our form parser. #: Note: all file sizes should be in bytes. DEFAULT_CONFIG = { 'MAX_BODY_SIZE': float('inf'), 'MAX_MEMORY_FILE_SIZE': 1 * 1024 * 1024, 'UPLOAD_DIR': None, 'UPLOAD_KEEP_FILENAME': False, 'UPLOAD_KEEP_EXTENSIONS': False, # Error on invalid Content-Transfer-Encoding? 'UPLOAD_ERROR_ON_BAD_CTE': False, } def __init__(self, content_type, on_field, on_file, on_end=None, boundary=None, file_name=None, FileClass=File, FieldClass=Field, config={}): self.logger = logging.getLogger(__name__) # Save variables. self.content_type = content_type self.boundary = boundary self.bytes_received = 0 self.parser = None # Save callbacks. self.on_field = on_field self.on_file = on_file self.on_end = on_end # Save classes. self.FileClass = File self.FieldClass = Field # Set configuration options. self.config = self.DEFAULT_CONFIG.copy() self.config.update(config) # Depending on the Content-Type, we instantiate the correct parser. if content_type == 'application/octet-stream': # Work around the lack of 'nonlocal' in Py2 class vars: f = None def on_start(): vars.f = FileClass(file_name, None, config=self.config) def on_data(data, start, end): vars.f.write(data[start:end]) def on_end(): # Finalize the file itself. vars.f.finalize() # Call our callback. on_file(vars.f) # Call the on-end callback. if self.on_end is not None: self.on_end() callbacks = { 'on_start': on_start, 'on_data': on_data, 'on_end': on_end, } # Instantiate an octet-stream parser parser = OctetStreamParser(callbacks, max_size=self.config['MAX_BODY_SIZE']) elif (content_type == 'application/x-www-form-urlencoded' or content_type == 'application/x-url-encoded'): name_buffer = [] class vars: f = None def on_field_start(): pass def on_field_name(data, start, end): name_buffer.append(data[start:end]) def on_field_data(data, start, end): if vars.f is None: vars.f = FieldClass(b''.join(name_buffer)) del name_buffer[:] vars.f.write(data[start:end]) def on_field_end(): # Finalize and call callback. if vars.f is None: # If we get here, it's because there was no field data. # We create a field, set it to None, and then continue. vars.f = FieldClass(b''.join(name_buffer)) del name_buffer[:] vars.f.set_none() vars.f.finalize() on_field(vars.f) vars.f = None def on_end(): if self.on_end is not None: self.on_end() # Setup callbacks. callbacks = { 'on_field_start': on_field_start, 'on_field_name': on_field_name, 'on_field_data': on_field_data, 'on_field_end': on_field_end, 'on_end': on_end, } # Instantiate parser. parser = QuerystringParser( callbacks=callbacks, max_size=self.config['MAX_BODY_SIZE'] ) elif content_type == 'multipart/form-data': if boundary is None: self.logger.error("No boundary given") raise FormParserError("No boundary given") header_name = [] header_value = [] headers = {} # No 'nonlocal' on Python 2 :-( class vars: f = None writer = None is_file = False def on_part_begin(): pass def on_part_data(data, start, end): bytes_processed = vars.writer.write(data[start:end]) # TODO: check for error here. return bytes_processed def on_part_end(): vars.f.finalize() if vars.is_file: on_file(vars.f) else: on_field(vars.f) def on_header_field(data, start, end): header_name.append(data[start:end]) def on_header_value(data, start, end): header_value.append(data[start:end]) def on_header_end(): headers[b''.join(header_name)] = b''.join(header_value) del header_name[:] del header_value[:] def on_headers_finished(): # Reset the 'is file' flag. vars.is_file = False # Parse the content-disposition header. # TODO: handle mixed case content_disp = headers.get(b'Content-Disposition') disp, options = parse_options_header(content_disp) # Get the field and filename. field_name = options.get(b'name') file_name = options.get(b'filename') # TODO: check for errors # Create the proper class. if file_name is None: vars.f = FieldClass(field_name) else: vars.f = FileClass(file_name, field_name, config=self.config) vars.is_file = True # Parse the given Content-Transfer-Encoding to determine what # we need to do with the incoming data. # TODO: check that we properly handle 8bit / 7bit encoding. transfer_encoding = headers.get(b'Content-Transfer-Encoding', b'7bit') if (transfer_encoding == b'binary' or transfer_encoding == b'8bit' or transfer_encoding == b'7bit'): vars.writer = vars.f elif transfer_encoding == b'base64': vars.writer = Base64Decoder(vars.f) elif transfer_encoding == b'quoted-printable': vars.writer = QuotedPrintableDecoder(vars.f) else: self.logger.warning("Unknown Content-Transfer-Encoding: " "%r", transfer_encoding) if self.config['UPLOAD_ERROR_ON_BAD_CTE']: raise FormParserError( 'Unknown Content-Transfer-Encoding "{}"'.format( transfer_encoding ) ) else: # If we aren't erroring, then we just treat this as an # unencoded Content-Transfer-Encoding. vars.writer = vars.f def on_end(): vars.writer.finalize() if self.on_end is not None: self.on_end() # These are our callbacks for the parser. callbacks = { 'on_part_begin': on_part_begin, 'on_part_data': on_part_data, 'on_part_end': on_part_end, 'on_header_field': on_header_field, 'on_header_value': on_header_value, 'on_header_end': on_header_end, 'on_headers_finished': on_headers_finished, 'on_end': on_end, } # Instantiate a multipart parser. parser = MultipartParser(boundary, callbacks, max_size=self.config['MAX_BODY_SIZE']) else: self.logger.warning("Unknown Content-Type: %r", content_type) raise FormParserError("Unknown Content-Type: {}".format( content_type )) self.parser = parser def write(self, data): """Write some data. The parser will forward this to the appropriate underlying parser. :param data: a bytestring """ self.bytes_received += len(data) # TODO: check the parser's return value for errors? return self.parser.write(data) def finalize(self): """Finalize the parser.""" if self.parser is not None and hasattr(self.parser, 'finalize'): self.parser.finalize() def close(self): """Close the parser.""" if self.parser is not None and hasattr(self.parser, 'close'): self.parser.close() def __repr__(self): return "{}(content_type={!r}, parser={!r})".format( self.__class__.__name__, self.content_type, self.parser, ) def create_form_parser(headers, on_field, on_file, trust_x_headers=False, config={}): """This function is a helper function to aid in creating a FormParser instances. Given a dictionary-like headers object, it will determine the correct information needed, instantiate a FormParser with the appropriate values and given callbacks, and then return the corresponding parser. :param headers: A dictionary-like object of HTTP headers. The only required header is Content-Type. :param on_field: Callback to call with each parsed field. :param on_file: Callback to call with each parsed file. :param trust_x_headers: Whether or not to trust information received from certain X-Headers - for example, the file name from X-File-Name. :param config: Configuration variables to pass to the FormParser. """ content_type = headers.get('Content-Type') if content_type is None: logging.getLogger(__name__).warning("No Content-Type header given") raise ValueError("No Content-Type header given!") # Boundaries are optional (the FormParser will raise if one is needed # but not given). content_type, params = parse_options_header(content_type) boundary = params.get(b'boundary') # We need content_type to be a string, not a bytes object. content_type = content_type.decode('latin-1') # File names are optional. file_name = headers.get('X-File-Name') # Instantiate a form parser. form_parser = FormParser(content_type, on_field, on_file, boundary=boundary, file_name=file_name, config=config) # Return our parser. return form_parser def parse_form(headers, input_stream, on_field, on_file, chunk_size=1048576, **kwargs): """This function is useful if you just want to parse a request body, without too much work. Pass it a dictionary-like object of the request's headers, and a file-like object for the input stream, along with two callbacks that will get called whenever a field or file is parsed. :param headers: A dictionary-like object of HTTP headers. The only required header is Content-Type. :param input_stream: A file-like object that represents the request body. The read() method must return bytestrings. :param on_field: Callback to call with each parsed field. :param on_file: Callback to call with each parsed file. :param chunk_size: The maximum size to read from the input stream and write to the parser at one time. Defaults to 1 MiB. """ # Create our form parser. parser = create_form_parser(headers, on_field, on_file) # Read chunks of 100KiB and write to the parser, but never read more than # the given Content-Length, if any. content_length = headers.get('Content-Length') if content_length is not None: content_length = int(content_length) else: content_length = float('inf') bytes_read = 0 while True: # Read only up to the Content-Length given. max_readable = min(content_length - bytes_read, 1048576) buff = input_stream.read(max_readable) # Write to the parser and update our length. parser.write(buff) bytes_read += len(buff) # If we get a buffer that's smaller than the size requested, or if we # have read up to our content length, we're done. if len(buff) != max_readable or bytes_read == content_length: break # Tell our parser that we're done writing data. parser.finalize()
71,230
Python
36.608765
109
0.524821
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multipart/decoders.py
import base64 import binascii from .exceptions import DecodeError class Base64Decoder: """This object provides an interface to decode a stream of Base64 data. It is instantiated with an "underlying object", and whenever a write() operation is performed, it will decode the incoming data as Base64, and call write() on the underlying object. This is primarily used for decoding form data encoded as Base64, but can be used for other purposes:: from multipart.decoders import Base64Decoder fd = open("notb64.txt", "wb") decoder = Base64Decoder(fd) try: decoder.write("Zm9vYmFy") # "foobar" in Base64 decoder.finalize() finally: decoder.close() # The contents of "notb64.txt" should be "foobar". This object will also pass all finalize() and close() calls to the underlying object, if the underlying object supports them. Note that this class maintains a cache of base64 chunks, so that a write of arbitrary size can be performed. You must call :meth:`finalize` on this object after all writes are completed to ensure that all data is flushed to the underlying object. :param underlying: the underlying object to pass writes to """ def __init__(self, underlying): self.cache = bytearray() self.underlying = underlying def write(self, data): """Takes any input data provided, decodes it as base64, and passes it on to the underlying object. If the data provided is invalid base64 data, then this method will raise a :class:`multipart.exceptions.DecodeError` :param data: base64 data to decode """ # Prepend any cache info to our data. if len(self.cache) > 0: data = self.cache + data # Slice off a string that's a multiple of 4. decode_len = (len(data) // 4) * 4 val = data[:decode_len] # Decode and write, if we have any. if len(val) > 0: try: decoded = base64.b64decode(val) except binascii.Error: raise DecodeError('There was an error raised while decoding ' 'base64-encoded data.') self.underlying.write(decoded) # Get the remaining bytes and save in our cache. remaining_len = len(data) % 4 if remaining_len > 0: self.cache = data[-remaining_len:] else: self.cache = b'' # Return the length of the data to indicate no error. return len(data) def close(self): """Close this decoder. If the underlying object has a `close()` method, this function will call it. """ if hasattr(self.underlying, 'close'): self.underlying.close() def finalize(self): """Finalize this object. This should be called when no more data should be written to the stream. This function can raise a :class:`multipart.exceptions.DecodeError` if there is some remaining data in the cache. If the underlying object has a `finalize()` method, this function will call it. """ if len(self.cache) > 0: raise DecodeError('There are %d bytes remaining in the ' 'Base64Decoder cache when finalize() is called' % len(self.cache)) if hasattr(self.underlying, 'finalize'): self.underlying.finalize() def __repr__(self): return f"{self.__class__.__name__}(underlying={self.underlying!r})" class QuotedPrintableDecoder: """This object provides an interface to decode a stream of quoted-printable data. It is instantiated with an "underlying object", in the same manner as the :class:`multipart.decoders.Base64Decoder` class. This class behaves in exactly the same way, including maintaining a cache of quoted-printable chunks. :param underlying: the underlying object to pass writes to """ def __init__(self, underlying): self.cache = b'' self.underlying = underlying def write(self, data): """Takes any input data provided, decodes it as quoted-printable, and passes it on to the underlying object. :param data: quoted-printable data to decode """ # Prepend any cache info to our data. if len(self.cache) > 0: data = self.cache + data # If the last 2 characters have an '=' sign in it, then we won't be # able to decode the encoded value and we'll need to save it for the # next decoding step. if data[-2:].find(b'=') != -1: enc, rest = data[:-2], data[-2:] else: enc = data rest = b'' # Encode and write, if we have data. if len(enc) > 0: self.underlying.write(binascii.a2b_qp(enc)) # Save remaining in cache. self.cache = rest return len(data) def close(self): """Close this decoder. If the underlying object has a `close()` method, this function will call it. """ if hasattr(self.underlying, 'close'): self.underlying.close() def finalize(self): """Finalize this object. This should be called when no more data should be written to the stream. This function will not raise any exceptions, but it may write more data to the underlying object if there is data remaining in the cache. If the underlying object has a `finalize()` method, this function will call it. """ # If we have a cache, write and then remove it. if len(self.cache) > 0: self.underlying.write(binascii.a2b_qp(self.cache)) self.cache = b'' # Finalize our underlying stream. if hasattr(self.underlying, 'finalize'): self.underlying.finalize() def __repr__(self): return f"{self.__class__.__name__}(underlying={self.underlying!r})"
6,107
Python
34.511628
79
0.606681
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multipart/__init__.py
# This is the canonical package information. __author__ = 'Andrew Dunham' __license__ = 'Apache' __copyright__ = "Copyright (c) 2012-2013, Andrew Dunham" __version__ = "0.0.6" from .multipart import ( FormParser, MultipartParser, QuerystringParser, OctetStreamParser, create_form_parser, parse_form, )
335
Python
19.999999
56
0.653731
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multipart/tests/test_multipart.py
import os import sys import glob import yaml import base64 import random import tempfile import unittest from .compat import ( parametrize, parametrize_class, slow_test, ) from io import BytesIO from unittest.mock import MagicMock, Mock, patch from ..multipart import * # Get the current directory for our later test cases. curr_dir = os.path.abspath(os.path.dirname(__file__)) def force_bytes(val): if isinstance(val, str): val = val.encode(sys.getfilesystemencoding()) return val class TestField(unittest.TestCase): def setUp(self): self.f = Field('foo') def test_name(self): self.assertEqual(self.f.field_name, 'foo') def test_data(self): self.f.write(b'test123') self.assertEqual(self.f.value, b'test123') def test_cache_expiration(self): self.f.write(b'test') self.assertEqual(self.f.value, b'test') self.f.write(b'123') self.assertEqual(self.f.value, b'test123') def test_finalize(self): self.f.write(b'test123') self.f.finalize() self.assertEqual(self.f.value, b'test123') def test_close(self): self.f.write(b'test123') self.f.close() self.assertEqual(self.f.value, b'test123') def test_from_value(self): f = Field.from_value(b'name', b'value') self.assertEqual(f.field_name, b'name') self.assertEqual(f.value, b'value') f2 = Field.from_value(b'name', None) self.assertEqual(f2.value, None) def test_equality(self): f1 = Field.from_value(b'name', b'value') f2 = Field.from_value(b'name', b'value') self.assertEqual(f1, f2) def test_equality_with_other(self): f = Field.from_value(b'foo', b'bar') self.assertFalse(f == b'foo') self.assertFalse(b'foo' == f) def test_set_none(self): f = Field(b'foo') self.assertEqual(f.value, b'') f.set_none() self.assertEqual(f.value, None) class TestFile(unittest.TestCase): def setUp(self): self.c = {} self.d = force_bytes(tempfile.mkdtemp()) self.f = File(b'foo.txt', config=self.c) def assert_data(self, data): f = self.f.file_object f.seek(0) self.assertEqual(f.read(), data) f.seek(0) f.truncate() def assert_exists(self): full_path = os.path.join(self.d, self.f.actual_file_name) self.assertTrue(os.path.exists(full_path)) def test_simple(self): self.f.write(b'foobar') self.assert_data(b'foobar') def test_invalid_write(self): m = Mock() m.write.return_value = 5 self.f._fileobj = m v = self.f.write(b'foobar') self.assertEqual(v, 5) def test_file_fallback(self): self.c['MAX_MEMORY_FILE_SIZE'] = 1 self.f.write(b'1') self.assertTrue(self.f.in_memory) self.assert_data(b'1') self.f.write(b'123') self.assertFalse(self.f.in_memory) self.assert_data(b'123') # Test flushing too. old_obj = self.f.file_object self.f.flush_to_disk() self.assertFalse(self.f.in_memory) self.assertIs(self.f.file_object, old_obj) def test_file_fallback_with_data(self): self.c['MAX_MEMORY_FILE_SIZE'] = 10 self.f.write(b'1' * 10) self.assertTrue(self.f.in_memory) self.f.write(b'2' * 10) self.assertFalse(self.f.in_memory) self.assert_data(b'11111111112222222222') def test_file_name(self): # Write to this dir. self.c['UPLOAD_DIR'] = self.d self.c['MAX_MEMORY_FILE_SIZE'] = 10 # Write. self.f.write(b'12345678901') self.assertFalse(self.f.in_memory) # Assert that the file exists self.assertIsNotNone(self.f.actual_file_name) self.assert_exists() def test_file_full_name(self): # Write to this dir. self.c['UPLOAD_DIR'] = self.d self.c['UPLOAD_KEEP_FILENAME'] = True self.c['MAX_MEMORY_FILE_SIZE'] = 10 # Write. self.f.write(b'12345678901') self.assertFalse(self.f.in_memory) # Assert that the file exists self.assertEqual(self.f.actual_file_name, b'foo') self.assert_exists() def test_file_full_name_with_ext(self): self.c['UPLOAD_DIR'] = self.d self.c['UPLOAD_KEEP_FILENAME'] = True self.c['UPLOAD_KEEP_EXTENSIONS'] = True self.c['MAX_MEMORY_FILE_SIZE'] = 10 # Write. self.f.write(b'12345678901') self.assertFalse(self.f.in_memory) # Assert that the file exists self.assertEqual(self.f.actual_file_name, b'foo.txt') self.assert_exists() def test_file_full_name_with_ext(self): self.c['UPLOAD_DIR'] = self.d self.c['UPLOAD_KEEP_FILENAME'] = True self.c['UPLOAD_KEEP_EXTENSIONS'] = True self.c['MAX_MEMORY_FILE_SIZE'] = 10 # Write. self.f.write(b'12345678901') self.assertFalse(self.f.in_memory) # Assert that the file exists self.assertEqual(self.f.actual_file_name, b'foo.txt') self.assert_exists() def test_no_dir_with_extension(self): self.c['UPLOAD_KEEP_EXTENSIONS'] = True self.c['MAX_MEMORY_FILE_SIZE'] = 10 # Write. self.f.write(b'12345678901') self.assertFalse(self.f.in_memory) # Assert that the file exists ext = os.path.splitext(self.f.actual_file_name)[1] self.assertEqual(ext, b'.txt') self.assert_exists() def test_invalid_dir_with_name(self): # Write to this dir. self.c['UPLOAD_DIR'] = force_bytes(os.path.join('/', 'tmp', 'notexisting')) self.c['UPLOAD_KEEP_FILENAME'] = True self.c['MAX_MEMORY_FILE_SIZE'] = 5 # Write. with self.assertRaises(FileError): self.f.write(b'1234567890') def test_invalid_dir_no_name(self): # Write to this dir. self.c['UPLOAD_DIR'] = force_bytes(os.path.join('/', 'tmp', 'notexisting')) self.c['UPLOAD_KEEP_FILENAME'] = False self.c['MAX_MEMORY_FILE_SIZE'] = 5 # Write. with self.assertRaises(FileError): self.f.write(b'1234567890') # TODO: test uploading two files with the same name. class TestParseOptionsHeader(unittest.TestCase): def test_simple(self): t, p = parse_options_header('application/json') self.assertEqual(t, b'application/json') self.assertEqual(p, {}) def test_blank(self): t, p = parse_options_header('') self.assertEqual(t, b'') self.assertEqual(p, {}) def test_single_param(self): t, p = parse_options_header('application/json;par=val') self.assertEqual(t, b'application/json') self.assertEqual(p, {b'par': b'val'}) def test_single_param_with_spaces(self): t, p = parse_options_header(b'application/json; par=val') self.assertEqual(t, b'application/json') self.assertEqual(p, {b'par': b'val'}) def test_multiple_params(self): t, p = parse_options_header(b'application/json;par=val;asdf=foo') self.assertEqual(t, b'application/json') self.assertEqual(p, {b'par': b'val', b'asdf': b'foo'}) def test_quoted_param(self): t, p = parse_options_header(b'application/json;param="quoted"') self.assertEqual(t, b'application/json') self.assertEqual(p, {b'param': b'quoted'}) def test_quoted_param_with_semicolon(self): t, p = parse_options_header(b'application/json;param="quoted;with;semicolons"') self.assertEqual(p[b'param'], b'quoted;with;semicolons') def test_quoted_param_with_escapes(self): t, p = parse_options_header(b'application/json;param="This \\" is \\" a \\" quote"') self.assertEqual(p[b'param'], b'This " is " a " quote') def test_handles_ie6_bug(self): t, p = parse_options_header(b'text/plain; filename="C:\\this\\is\\a\\path\\file.txt"') self.assertEqual(p[b'filename'], b'file.txt') class TestBaseParser(unittest.TestCase): def setUp(self): self.b = BaseParser() self.b.callbacks = {} def test_callbacks(self): # The stupid list-ness is to get around lack of nonlocal on py2 l = [0] def on_foo(): l[0] += 1 self.b.set_callback('foo', on_foo) self.b.callback('foo') self.assertEqual(l[0], 1) self.b.set_callback('foo', None) self.b.callback('foo') self.assertEqual(l[0], 1) class TestQuerystringParser(unittest.TestCase): def assert_fields(self, *args, **kwargs): if kwargs.pop('finalize', True): self.p.finalize() self.assertEqual(self.f, list(args)) if kwargs.get('reset', True): self.f = [] def setUp(self): self.reset() def reset(self): self.f = [] name_buffer = [] data_buffer = [] def on_field_name(data, start, end): name_buffer.append(data[start:end]) def on_field_data(data, start, end): data_buffer.append(data[start:end]) def on_field_end(): self.f.append(( b''.join(name_buffer), b''.join(data_buffer) )) del name_buffer[:] del data_buffer[:] callbacks = { 'on_field_name': on_field_name, 'on_field_data': on_field_data, 'on_field_end': on_field_end } self.p = QuerystringParser(callbacks) def test_simple_querystring(self): self.p.write(b'foo=bar') self.assert_fields((b'foo', b'bar')) def test_querystring_blank_beginning(self): self.p.write(b'&foo=bar') self.assert_fields((b'foo', b'bar')) def test_querystring_blank_end(self): self.p.write(b'foo=bar&') self.assert_fields((b'foo', b'bar')) def test_multiple_querystring(self): self.p.write(b'foo=bar&asdf=baz') self.assert_fields( (b'foo', b'bar'), (b'asdf', b'baz') ) def test_streaming_simple(self): self.p.write(b'foo=bar&') self.assert_fields( (b'foo', b'bar'), finalize=False ) self.p.write(b'asdf=baz') self.assert_fields( (b'asdf', b'baz') ) def test_streaming_break(self): self.p.write(b'foo=one') self.assert_fields(finalize=False) self.p.write(b'two') self.assert_fields(finalize=False) self.p.write(b'three') self.assert_fields(finalize=False) self.p.write(b'&asd') self.assert_fields( (b'foo', b'onetwothree'), finalize=False ) self.p.write(b'f=baz') self.assert_fields( (b'asdf', b'baz') ) def test_semicolon_separator(self): self.p.write(b'foo=bar;asdf=baz') self.assert_fields( (b'foo', b'bar'), (b'asdf', b'baz') ) def test_too_large_field(self): self.p.max_size = 15 # Note: len = 8 self.p.write(b"foo=bar&") self.assert_fields((b'foo', b'bar'), finalize=False) # Note: len = 8, only 7 bytes processed self.p.write(b'a=123456') self.assert_fields((b'a', b'12345')) def test_invalid_max_size(self): with self.assertRaises(ValueError): p = QuerystringParser(max_size=-100) def test_strict_parsing_pass(self): data = b'foo=bar&another=asdf' for first, last in split_all(data): self.reset() self.p.strict_parsing = True print(f"{first!r} / {last!r}") self.p.write(first) self.p.write(last) self.assert_fields((b'foo', b'bar'), (b'another', b'asdf')) def test_strict_parsing_fail_double_sep(self): data = b'foo=bar&&another=asdf' for first, last in split_all(data): self.reset() self.p.strict_parsing = True cnt = 0 with self.assertRaises(QuerystringParseError) as cm: cnt += self.p.write(first) cnt += self.p.write(last) self.p.finalize() # The offset should occur at 8 bytes into the data (as a whole), # so we calculate the offset into the chunk. if cm is not None: self.assertEqual(cm.exception.offset, 8 - cnt) def test_double_sep(self): data = b'foo=bar&&another=asdf' for first, last in split_all(data): print(f" {first!r} / {last!r} ") self.reset() cnt = 0 cnt += self.p.write(first) cnt += self.p.write(last) self.assert_fields((b'foo', b'bar'), (b'another', b'asdf')) def test_strict_parsing_fail_no_value(self): self.p.strict_parsing = True with self.assertRaises(QuerystringParseError) as cm: self.p.write(b'foo=bar&blank&another=asdf') if cm is not None: self.assertEqual(cm.exception.offset, 8) def test_success_no_value(self): self.p.write(b'foo=bar&blank&another=asdf') self.assert_fields( (b'foo', b'bar'), (b'blank', b''), (b'another', b'asdf') ) def test_repr(self): # Issue #29; verify we don't assert on repr() _ignored = repr(self.p) class TestOctetStreamParser(unittest.TestCase): def setUp(self): self.d = [] self.started = 0 self.finished = 0 def on_start(): self.started += 1 def on_data(data, start, end): self.d.append(data[start:end]) def on_end(): self.finished += 1 callbacks = { 'on_start': on_start, 'on_data': on_data, 'on_end': on_end } self.p = OctetStreamParser(callbacks) def assert_data(self, data, finalize=True): self.assertEqual(b''.join(self.d), data) self.d = [] def assert_started(self, val=True): if val: self.assertEqual(self.started, 1) else: self.assertEqual(self.started, 0) def assert_finished(self, val=True): if val: self.assertEqual(self.finished, 1) else: self.assertEqual(self.finished, 0) def test_simple(self): # Assert is not started self.assert_started(False) # Write something, it should then be started + have data self.p.write(b'foobar') self.assert_started() self.assert_data(b'foobar') # Finalize, and check self.assert_finished(False) self.p.finalize() self.assert_finished() def test_multiple_chunks(self): self.p.write(b'foo') self.p.write(b'bar') self.p.write(b'baz') self.p.finalize() self.assert_data(b'foobarbaz') self.assert_finished() def test_max_size(self): self.p.max_size = 5 self.p.write(b'0123456789') self.p.finalize() self.assert_data(b'01234') self.assert_finished() def test_invalid_max_size(self): with self.assertRaises(ValueError): q = OctetStreamParser(max_size='foo') class TestBase64Decoder(unittest.TestCase): # Note: base64('foobar') == 'Zm9vYmFy' def setUp(self): self.f = BytesIO() self.d = Base64Decoder(self.f) def assert_data(self, data, finalize=True): if finalize: self.d.finalize() self.f.seek(0) self.assertEqual(self.f.read(), data) self.f.seek(0) self.f.truncate() def test_simple(self): self.d.write(b'Zm9vYmFy') self.assert_data(b'foobar') def test_bad(self): with self.assertRaises(DecodeError): self.d.write(b'Zm9v!mFy') def test_split_properly(self): self.d.write(b'Zm9v') self.d.write(b'YmFy') self.assert_data(b'foobar') def test_bad_split(self): buff = b'Zm9v' for i in range(1, 4): first, second = buff[:i], buff[i:] self.setUp() self.d.write(first) self.d.write(second) self.assert_data(b'foo') def test_long_bad_split(self): buff = b'Zm9vYmFy' for i in range(5, 8): first, second = buff[:i], buff[i:] self.setUp() self.d.write(first) self.d.write(second) self.assert_data(b'foobar') def test_close_and_finalize(self): parser = Mock() f = Base64Decoder(parser) f.finalize() parser.finalize.assert_called_once_with() f.close() parser.close.assert_called_once_with() def test_bad_length(self): self.d.write(b'Zm9vYmF') # missing ending 'y' with self.assertRaises(DecodeError): self.d.finalize() class TestQuotedPrintableDecoder(unittest.TestCase): def setUp(self): self.f = BytesIO() self.d = QuotedPrintableDecoder(self.f) def assert_data(self, data, finalize=True): if finalize: self.d.finalize() self.f.seek(0) self.assertEqual(self.f.read(), data) self.f.seek(0) self.f.truncate() def test_simple(self): self.d.write(b'foobar') self.assert_data(b'foobar') def test_with_escape(self): self.d.write(b'foo=3Dbar') self.assert_data(b'foo=bar') def test_with_newline_escape(self): self.d.write(b'foo=\r\nbar') self.assert_data(b'foobar') def test_with_only_newline_escape(self): self.d.write(b'foo=\nbar') self.assert_data(b'foobar') def test_with_split_escape(self): self.d.write(b'foo=3') self.d.write(b'Dbar') self.assert_data(b'foo=bar') def test_with_split_newline_escape_1(self): self.d.write(b'foo=\r') self.d.write(b'\nbar') self.assert_data(b'foobar') def test_with_split_newline_escape_2(self): self.d.write(b'foo=') self.d.write(b'\r\nbar') self.assert_data(b'foobar') def test_close_and_finalize(self): parser = Mock() f = QuotedPrintableDecoder(parser) f.finalize() parser.finalize.assert_called_once_with() f.close() parser.close.assert_called_once_with() def test_not_aligned(self): """ https://github.com/andrew-d/python-multipart/issues/6 """ self.d.write(b'=3AX') self.assert_data(b':X') # Additional offset tests self.d.write(b'=3') self.d.write(b'AX') self.assert_data(b':X') self.d.write(b'q=3AX') self.assert_data(b'q:X') # Load our list of HTTP test cases. http_tests_dir = os.path.join(curr_dir, 'test_data', 'http') # Read in all test cases and load them. NON_PARAMETRIZED_TESTS = {'single_field_blocks'} http_tests = [] for f in os.listdir(http_tests_dir): # Only load the HTTP test cases. fname, ext = os.path.splitext(f) if fname in NON_PARAMETRIZED_TESTS: continue if ext == '.http': # Get the YAML file and load it too. yaml_file = os.path.join(http_tests_dir, fname + '.yaml') # Load both. with open(os.path.join(http_tests_dir, f), 'rb') as f: test_data = f.read() with open(yaml_file, 'rb') as f: yaml_data = yaml.safe_load(f) http_tests.append({ 'name': fname, 'test': test_data, 'result': yaml_data }) def split_all(val): """ This function will split an array all possible ways. For example: split_all([1,2,3,4]) will give: ([1], [2,3,4]), ([1,2], [3,4]), ([1,2,3], [4]) """ for i in range(1, len(val) - 1): yield (val[:i], val[i:]) @parametrize_class class TestFormParser(unittest.TestCase): def make(self, boundary, config={}): self.ended = False self.files = [] self.fields = [] def on_field(f): self.fields.append(f) def on_file(f): self.files.append(f) def on_end(): self.ended = True # Get a form-parser instance. self.f = FormParser('multipart/form-data', on_field, on_file, on_end, boundary=boundary, config=config) def assert_file_data(self, f, data): o = f.file_object o.seek(0) file_data = o.read() self.assertEqual(file_data, data) def assert_file(self, field_name, file_name, data): # Find this file. found = None for f in self.files: if f.field_name == field_name: found = f break # Assert that we found it. self.assertIsNotNone(found) try: # Assert about this file. self.assert_file_data(found, data) self.assertEqual(found.file_name, file_name) # Remove it from our list. self.files.remove(found) finally: # Close our file found.close() def assert_field(self, name, value): # Find this field in our fields list. found = None for f in self.fields: if f.field_name == name: found = f break # Assert that it exists and matches. self.assertIsNotNone(found) self.assertEqual(value, found.value) # Remove it for future iterations. self.fields.remove(found) @parametrize('param', http_tests) def test_http(self, param): # Firstly, create our parser with the given boundary. boundary = param['result']['boundary'] if isinstance(boundary, str): boundary = boundary.encode('latin-1') self.make(boundary) # Now, we feed the parser with data. exc = None try: processed = self.f.write(param['test']) self.f.finalize() except MultipartParseError as e: processed = 0 exc = e # print(repr(param)) # print("") # print(repr(self.fields)) # print(repr(self.files)) # Do we expect an error? if 'error' in param['result']['expected']: self.assertIsNotNone(exc) self.assertEqual(param['result']['expected']['error'], exc.offset) return # No error! self.assertEqual(processed, len(param['test'])) # Assert that the parser gave us the appropriate fields/files. for e in param['result']['expected']: # Get our type and name. type = e['type'] name = e['name'].encode('latin-1') if type == 'field': self.assert_field(name, e['data']) elif type == 'file': self.assert_file( name, e['file_name'].encode('latin-1'), e['data'] ) else: assert False def test_random_splitting(self): """ This test runs a simple multipart body with one field and one file through every possible split. """ # Load test data. test_file = 'single_field_single_file.http' with open(os.path.join(http_tests_dir, test_file), 'rb') as f: test_data = f.read() # We split the file through all cases. for first, last in split_all(test_data): # Create form parser. self.make('boundary') # Feed with data in 2 chunks. i = 0 i += self.f.write(first) i += self.f.write(last) self.f.finalize() # Assert we processed everything. self.assertEqual(i, len(test_data)) # Assert that our file and field are here. self.assert_field(b'field', b'test1') self.assert_file(b'file', b'file.txt', b'test2') def test_feed_single_bytes(self): """ This test parses a simple multipart body 1 byte at a time. """ # Load test data. test_file = 'single_field_single_file.http' with open(os.path.join(http_tests_dir, test_file), 'rb') as f: test_data = f.read() # Create form parser. self.make('boundary') # Write all bytes. # NOTE: Can't simply do `for b in test_data`, since that gives # an integer when iterating over a bytes object on Python 3. i = 0 for x in range(len(test_data)): b = test_data[x:x + 1] i += self.f.write(b) self.f.finalize() # Assert we processed everything. self.assertEqual(i, len(test_data)) # Assert that our file and field are here. self.assert_field(b'field', b'test1') self.assert_file(b'file', b'file.txt', b'test2') def test_feed_blocks(self): """ This test parses a simple multipart body 1 byte at a time. """ # Load test data. test_file = 'single_field_blocks.http' with open(os.path.join(http_tests_dir, test_file), 'rb') as f: test_data = f.read() for c in range(1, len(test_data) + 1): # Skip first `d` bytes - not interesting for d in range(c): # Create form parser. self.make('boundary') # Skip i = 0 self.f.write(test_data[:d]) i += d for x in range(d, len(test_data), c): # Write a chunk to achieve condition # `i == data_length - 1` # in boundary search loop (multipatr.py:1302) b = test_data[x:x + c] i += self.f.write(b) self.f.finalize() # Assert we processed everything. self.assertEqual(i, len(test_data)) # Assert that our field is here. self.assert_field(b'field', b'0123456789ABCDEFGHIJ0123456789ABCDEFGHIJ') @slow_test def test_request_body_fuzz(self): """ This test randomly fuzzes the request body to ensure that no strange exceptions are raised and we don't end up in a strange state. The fuzzing consists of randomly doing one of the following: - Adding a random byte at a random offset - Randomly deleting a single byte - Randomly swapping two bytes """ # Load test data. test_file = 'single_field_single_file.http' with open(os.path.join(http_tests_dir, test_file), 'rb') as f: test_data = f.read() iterations = 1000 successes = 0 failures = 0 exceptions = 0 print("Running %d iterations of fuzz testing:" % (iterations,)) for i in range(iterations): # Create a bytearray to mutate. fuzz_data = bytearray(test_data) # Pick what we're supposed to do. choice = random.choice([1, 2, 3]) if choice == 1: # Add a random byte. i = random.randrange(len(test_data)) b = random.randrange(256) fuzz_data.insert(i, b) msg = "Inserting byte %r at offset %d" % (b, i) elif choice == 2: # Remove a random byte. i = random.randrange(len(test_data)) del fuzz_data[i] msg = "Deleting byte at offset %d" % (i,) elif choice == 3: # Swap two bytes. i = random.randrange(len(test_data) - 1) fuzz_data[i], fuzz_data[i + 1] = fuzz_data[i + 1], fuzz_data[i] msg = "Swapping bytes %d and %d" % (i, i + 1) # Print message, so if this crashes, we can inspect the output. print(" " + msg) # Create form parser. self.make('boundary') # Feed with data, and ignore form parser exceptions. i = 0 try: i = self.f.write(bytes(fuzz_data)) self.f.finalize() except FormParserError: exceptions += 1 else: if i == len(fuzz_data): successes += 1 else: failures += 1 print("--------------------------------------------------") print("Successes: %d" % (successes,)) print("Failures: %d" % (failures,)) print("Exceptions: %d" % (exceptions,)) @slow_test def test_request_body_fuzz_random_data(self): """ This test will fuzz the multipart parser with some number of iterations of randomly-generated data. """ iterations = 1000 successes = 0 failures = 0 exceptions = 0 print("Running %d iterations of fuzz testing:" % (iterations,)) for i in range(iterations): data_size = random.randrange(100, 4096) data = os.urandom(data_size) print(" Testing with %d random bytes..." % (data_size,)) # Create form parser. self.make('boundary') # Feed with data, and ignore form parser exceptions. i = 0 try: i = self.f.write(bytes(data)) self.f.finalize() except FormParserError: exceptions += 1 else: if i == len(data): successes += 1 else: failures += 1 print("--------------------------------------------------") print("Successes: %d" % (successes,)) print("Failures: %d" % (failures,)) print("Exceptions: %d" % (exceptions,)) def test_bad_start_boundary(self): self.make('boundary') data = b'--boundary\rfoobar' with self.assertRaises(MultipartParseError): self.f.write(data) self.make('boundary') data = b'--boundaryfoobar' with self.assertRaises(MultipartParseError): i = self.f.write(data) def test_octet_stream(self): files = [] def on_file(f): files.append(f) on_field = Mock() on_end = Mock() f = FormParser('application/octet-stream', on_field, on_file, on_end=on_end, file_name=b'foo.txt') self.assertTrue(isinstance(f.parser, OctetStreamParser)) f.write(b'test') f.write(b'1234') f.finalize() # Assert that we only received a single file, with the right data, and that we're done. self.assertFalse(on_field.called) self.assertEqual(len(files), 1) self.assert_file_data(files[0], b'test1234') self.assertTrue(on_end.called) def test_querystring(self): fields = [] def on_field(f): fields.append(f) on_file = Mock() on_end = Mock() def simple_test(f): # Reset tracking. del fields[:] on_file.reset_mock() on_end.reset_mock() # Write test data. f.write(b'foo=bar') f.write(b'&test=asdf') f.finalize() # Assert we only received 2 fields... self.assertFalse(on_file.called) self.assertEqual(len(fields), 2) # ...assert that we have the correct data... self.assertEqual(fields[0].field_name, b'foo') self.assertEqual(fields[0].value, b'bar') self.assertEqual(fields[1].field_name, b'test') self.assertEqual(fields[1].value, b'asdf') # ... and assert that we've finished. self.assertTrue(on_end.called) f = FormParser('application/x-www-form-urlencoded', on_field, on_file, on_end=on_end) self.assertTrue(isinstance(f.parser, QuerystringParser)) simple_test(f) f = FormParser('application/x-url-encoded', on_field, on_file, on_end=on_end) self.assertTrue(isinstance(f.parser, QuerystringParser)) simple_test(f) def test_close_methods(self): parser = Mock() f = FormParser('application/x-url-encoded', None, None) f.parser = parser f.finalize() parser.finalize.assert_called_once_with() f.close() parser.close.assert_called_once_with() def test_bad_content_type(self): # We should raise a ValueError for a bad Content-Type with self.assertRaises(ValueError): f = FormParser('application/bad', None, None) def test_no_boundary_given(self): # We should raise a FormParserError when parsing a multipart message # without a boundary. with self.assertRaises(FormParserError): f = FormParser('multipart/form-data', None, None) def test_bad_content_transfer_encoding(self): data = b'----boundary\r\nContent-Disposition: form-data; name="file"; filename="test.txt"\r\nContent-Type: text/plain\r\nContent-Transfer-Encoding: badstuff\r\n\r\nTest\r\n----boundary--\r\n' files = [] def on_file(f): files.append(f) on_field = Mock() on_end = Mock() # Test with erroring. config = {'UPLOAD_ERROR_ON_BAD_CTE': True} f = FormParser('multipart/form-data', on_field, on_file, on_end=on_end, boundary='--boundary', config=config) with self.assertRaises(FormParserError): f.write(data) f.finalize() # Test without erroring. config = {'UPLOAD_ERROR_ON_BAD_CTE': False} f = FormParser('multipart/form-data', on_field, on_file, on_end=on_end, boundary='--boundary', config=config) f.write(data) f.finalize() self.assert_file_data(files[0], b'Test') def test_handles_None_fields(self): fields = [] def on_field(f): fields.append(f) on_file = Mock() on_end = Mock() f = FormParser('application/x-www-form-urlencoded', on_field, on_file, on_end=on_end) f.write(b'foo=bar&another&baz=asdf') f.finalize() self.assertEqual(fields[0].field_name, b'foo') self.assertEqual(fields[0].value, b'bar') self.assertEqual(fields[1].field_name, b'another') self.assertEqual(fields[1].value, None) self.assertEqual(fields[2].field_name, b'baz') self.assertEqual(fields[2].value, b'asdf') def test_max_size_multipart(self): # Load test data. test_file = 'single_field_single_file.http' with open(os.path.join(http_tests_dir, test_file), 'rb') as f: test_data = f.read() # Create form parser. self.make('boundary') # Set the maximum length that we can process to be halfway through the # given data. self.f.parser.max_size = len(test_data) / 2 i = self.f.write(test_data) self.f.finalize() # Assert we processed the correct amount. self.assertEqual(i, len(test_data) / 2) def test_max_size_form_parser(self): # Load test data. test_file = 'single_field_single_file.http' with open(os.path.join(http_tests_dir, test_file), 'rb') as f: test_data = f.read() # Create form parser setting the maximum length that we can process to # be halfway through the given data. size = len(test_data) / 2 self.make('boundary', config={'MAX_BODY_SIZE': size}) i = self.f.write(test_data) self.f.finalize() # Assert we processed the correct amount. self.assertEqual(i, len(test_data) / 2) def test_octet_stream_max_size(self): files = [] def on_file(f): files.append(f) on_field = Mock() on_end = Mock() f = FormParser('application/octet-stream', on_field, on_file, on_end=on_end, file_name=b'foo.txt', config={'MAX_BODY_SIZE': 10}) f.write(b'0123456789012345689') f.finalize() self.assert_file_data(files[0], b'0123456789') def test_invalid_max_size_multipart(self): with self.assertRaises(ValueError): q = MultipartParser(b'bound', max_size='foo') class TestHelperFunctions(unittest.TestCase): def test_create_form_parser(self): r = create_form_parser({'Content-Type': 'application/octet-stream'}, None, None) self.assertTrue(isinstance(r, FormParser)) def test_create_form_parser_error(self): headers = {} with self.assertRaises(ValueError): create_form_parser(headers, None, None) def test_parse_form(self): on_field = Mock() on_file = Mock() parse_form( {'Content-Type': 'application/octet-stream', }, BytesIO(b'123456789012345'), on_field, on_file ) assert on_file.call_count == 1 # Assert that the first argument of the call (a File object) has size # 15 - i.e. all data is written. self.assertEqual(on_file.call_args[0][0].size, 15) def test_parse_form_content_length(self): files = [] def on_file(file): files.append(file) parse_form( {'Content-Type': 'application/octet-stream', 'Content-Length': '10' }, BytesIO(b'123456789012345'), None, on_file ) self.assertEqual(len(files), 1) self.assertEqual(files[0].size, 10) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestFile)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestParseOptionsHeader)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestBaseParser)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestQuerystringParser)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestOctetStreamParser)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestBase64Decoder)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestQuotedPrintableDecoder)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestFormParser)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestHelperFunctions)) return suite
38,988
Python
28.853752
199
0.553555
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multipart/tests/compat.py
import os import re import sys import types import functools def ensure_in_path(path): """ Ensure that a given path is in the sys.path array """ if not os.path.isdir(path): raise RuntimeError('Tried to add nonexisting path') def _samefile(x, y): try: return os.path.samefile(x, y) except OSError: return False except AttributeError: # Probably on Windows. path1 = os.path.abspath(x).lower() path2 = os.path.abspath(y).lower() return path1 == path2 # Remove existing copies of it. for pth in sys.path: if _samefile(pth, path): sys.path.remove(pth) # Add it at the beginning. sys.path.insert(0, path) # Check if pytest is imported. If so, we use it to create marking decorators. # If not, we just create a function that does nothing. try: import pytest except ImportError: pytest = None if pytest is not None: slow_test = pytest.mark.slow_test xfail = pytest.mark.xfail else: slow_test = lambda x: x def xfail(*args, **kwargs): if len(args) > 0 and isinstance(args[0], types.FunctionType): return args[0] return lambda x: x # We don't use the pytest parametrizing function, since it seems to break # with unittest.TestCase subclasses. def parametrize(field_names, field_values): # If we're not given a list of field names, we make it. if not isinstance(field_names, (tuple, list)): field_names = (field_names,) field_values = [(val,) for val in field_values] # Create a decorator that saves this list of field names and values on the # function for later parametrizing. def decorator(func): func.__dict__['param_names'] = field_names func.__dict__['param_values'] = field_values return func return decorator # This is a metaclass that actually performs the parametrization. class ParametrizingMetaclass(type): IDENTIFIER_RE = re.compile('[^A-Za-z0-9]') def __new__(klass, name, bases, attrs): new_attrs = attrs.copy() for attr_name, attr in attrs.items(): # We only care about functions if not isinstance(attr, types.FunctionType): continue param_names = attr.__dict__.pop('param_names', None) param_values = attr.__dict__.pop('param_values', None) if param_names is None or param_values is None: continue # Create multiple copies of the function. for i, values in enumerate(param_values): assert len(param_names) == len(values) # Get a repr of the values, and fix it to be a valid identifier human = '_'.join( [klass.IDENTIFIER_RE.sub('', repr(x)) for x in values] ) # Create a new name. # new_name = attr.__name__ + "_%d" % i new_name = attr.__name__ + "__" + human # Create a replacement function. def create_new_func(func, names, values): # Create a kwargs dictionary. kwargs = dict(zip(names, values)) @functools.wraps(func) def new_func(self): return func(self, **kwargs) # Manually set the name and return the new function. new_func.__name__ = new_name return new_func # Actually create the new function. new_func = create_new_func(attr, param_names, values) # Save this new function in our attrs dict. new_attrs[new_name] = new_func # Remove the old attribute from our new dictionary. del new_attrs[attr_name] # We create the class as normal, except we use our new attributes. return type.__new__(klass, name, bases, new_attrs) # This is a class decorator that actually applies the above metaclass. def parametrize_class(klass): return ParametrizingMetaclass(klass.__name__, klass.__bases__, klass.__dict__)
4,266
Python
30.843283
79
0.569386
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/endpoints.py
import json import typing from starlette import status from starlette._utils import is_async_callable from starlette.concurrency import run_in_threadpool from starlette.exceptions import HTTPException from starlette.requests import Request from starlette.responses import PlainTextResponse, Response from starlette.types import Message, Receive, Scope, Send from starlette.websockets import WebSocket class HTTPEndpoint: def __init__(self, scope: Scope, receive: Receive, send: Send) -> None: assert scope["type"] == "http" self.scope = scope self.receive = receive self.send = send self._allowed_methods = [ method for method in ("GET", "HEAD", "POST", "PUT", "PATCH", "DELETE", "OPTIONS") if getattr(self, method.lower(), None) is not None ] def __await__(self) -> typing.Generator: return self.dispatch().__await__() async def dispatch(self) -> None: request = Request(self.scope, receive=self.receive) handler_name = ( "get" if request.method == "HEAD" and not hasattr(self, "head") else request.method.lower() ) handler: typing.Callable[[Request], typing.Any] = getattr( self, handler_name, self.method_not_allowed ) is_async = is_async_callable(handler) if is_async: response = await handler(request) else: response = await run_in_threadpool(handler, request) await response(self.scope, self.receive, self.send) async def method_not_allowed(self, request: Request) -> Response: # If we're running inside a starlette application then raise an # exception, so that the configurable exception handler can deal with # returning the response. For plain ASGI apps, just return the response. headers = {"Allow": ", ".join(self._allowed_methods)} if "app" in self.scope: raise HTTPException(status_code=405, headers=headers) return PlainTextResponse("Method Not Allowed", status_code=405, headers=headers) class WebSocketEndpoint: encoding: typing.Optional[str] = None # May be "text", "bytes", or "json". def __init__(self, scope: Scope, receive: Receive, send: Send) -> None: assert scope["type"] == "websocket" self.scope = scope self.receive = receive self.send = send def __await__(self) -> typing.Generator: return self.dispatch().__await__() async def dispatch(self) -> None: websocket = WebSocket(self.scope, receive=self.receive, send=self.send) await self.on_connect(websocket) close_code = status.WS_1000_NORMAL_CLOSURE try: while True: message = await websocket.receive() if message["type"] == "websocket.receive": data = await self.decode(websocket, message) await self.on_receive(websocket, data) elif message["type"] == "websocket.disconnect": close_code = int( message.get("code") or status.WS_1000_NORMAL_CLOSURE ) break except Exception as exc: close_code = status.WS_1011_INTERNAL_ERROR raise exc finally: await self.on_disconnect(websocket, close_code) async def decode(self, websocket: WebSocket, message: Message) -> typing.Any: if self.encoding == "text": if "text" not in message: await websocket.close(code=status.WS_1003_UNSUPPORTED_DATA) raise RuntimeError("Expected text websocket messages, but got bytes") return message["text"] elif self.encoding == "bytes": if "bytes" not in message: await websocket.close(code=status.WS_1003_UNSUPPORTED_DATA) raise RuntimeError("Expected bytes websocket messages, but got text") return message["bytes"] elif self.encoding == "json": if message.get("text") is not None: text = message["text"] else: text = message["bytes"].decode("utf-8") try: return json.loads(text) except json.decoder.JSONDecodeError: await websocket.close(code=status.WS_1003_UNSUPPORTED_DATA) raise RuntimeError("Malformed JSON data received.") assert ( self.encoding is None ), f"Unsupported 'encoding' attribute {self.encoding}" return message["text"] if message.get("text") else message["bytes"] async def on_connect(self, websocket: WebSocket) -> None: """Override to handle an incoming websocket connection""" await websocket.accept() async def on_receive(self, websocket: WebSocket, data: typing.Any) -> None: """Override to handle an incoming websocket message""" async def on_disconnect(self, websocket: WebSocket, close_code: int) -> None: """Override to handle a disconnecting websocket"""
5,145
Python
37.691729
88
0.607191
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/config.py
import os import typing from collections.abc import MutableMapping from pathlib import Path class undefined: pass class EnvironError(Exception): pass class Environ(MutableMapping): def __init__(self, environ: typing.MutableMapping = os.environ): self._environ = environ self._has_been_read: typing.Set[typing.Any] = set() def __getitem__(self, key: typing.Any) -> typing.Any: self._has_been_read.add(key) return self._environ.__getitem__(key) def __setitem__(self, key: typing.Any, value: typing.Any) -> None: if key in self._has_been_read: raise EnvironError( f"Attempting to set environ['{key}'], but the value has already been " "read." ) self._environ.__setitem__(key, value) def __delitem__(self, key: typing.Any) -> None: if key in self._has_been_read: raise EnvironError( f"Attempting to delete environ['{key}'], but the value has already " "been read." ) self._environ.__delitem__(key) def __iter__(self) -> typing.Iterator: return iter(self._environ) def __len__(self) -> int: return len(self._environ) environ = Environ() T = typing.TypeVar("T") class Config: def __init__( self, env_file: typing.Optional[typing.Union[str, Path]] = None, environ: typing.Mapping[str, str] = environ, env_prefix: str = "", ) -> None: self.environ = environ self.env_prefix = env_prefix self.file_values: typing.Dict[str, str] = {} if env_file is not None and os.path.isfile(env_file): self.file_values = self._read_file(env_file) @typing.overload def __call__(self, key: str, *, default: None) -> typing.Optional[str]: ... @typing.overload def __call__(self, key: str, cast: typing.Type[T], default: T = ...) -> T: ... @typing.overload def __call__( self, key: str, cast: typing.Type[str] = ..., default: str = ... ) -> str: ... @typing.overload def __call__( self, key: str, cast: typing.Callable[[typing.Any], T] = ..., default: typing.Any = ..., ) -> T: ... @typing.overload def __call__( self, key: str, cast: typing.Type[str] = ..., default: T = ... ) -> typing.Union[T, str]: ... def __call__( self, key: str, cast: typing.Optional[typing.Callable] = None, default: typing.Any = undefined, ) -> typing.Any: return self.get(key, cast, default) def get( self, key: str, cast: typing.Optional[typing.Callable] = None, default: typing.Any = undefined, ) -> typing.Any: key = self.env_prefix + key if key in self.environ: value = self.environ[key] return self._perform_cast(key, value, cast) if key in self.file_values: value = self.file_values[key] return self._perform_cast(key, value, cast) if default is not undefined: return self._perform_cast(key, default, cast) raise KeyError(f"Config '{key}' is missing, and has no default.") def _read_file(self, file_name: typing.Union[str, Path]) -> typing.Dict[str, str]: file_values: typing.Dict[str, str] = {} with open(file_name) as input_file: for line in input_file.readlines(): line = line.strip() if "=" in line and not line.startswith("#"): key, value = line.split("=", 1) key = key.strip() value = value.strip().strip("\"'") file_values[key] = value return file_values def _perform_cast( self, key: str, value: typing.Any, cast: typing.Optional[typing.Callable] = None ) -> typing.Any: if cast is None or value is None: return value elif cast is bool and isinstance(value, str): mapping = {"true": True, "1": True, "false": False, "0": False} value = value.lower() if value not in mapping: raise ValueError( f"Config '{key}' has value '{value}'. Not a valid bool." ) return mapping[value] try: return cast(value) except (TypeError, ValueError): raise ValueError( f"Config '{key}' has value '{value}'. Not a valid {cast.__name__}." )
4,607
Python
29.72
88
0.533536
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/exceptions.py
import http import typing import warnings __all__ = ("HTTPException", "WebSocketException") class HTTPException(Exception): def __init__( self, status_code: int, detail: typing.Optional[str] = None, headers: typing.Optional[dict] = None, ) -> None: if detail is None: detail = http.HTTPStatus(status_code).phrase self.status_code = status_code self.detail = detail self.headers = headers def __repr__(self) -> str: class_name = self.__class__.__name__ return f"{class_name}(status_code={self.status_code!r}, detail={self.detail!r})" class WebSocketException(Exception): def __init__(self, code: int, reason: typing.Optional[str] = None) -> None: self.code = code self.reason = reason or "" def __repr__(self) -> str: class_name = self.__class__.__name__ return f"{class_name}(code={self.code!r}, reason={self.reason!r})" __deprecated__ = "ExceptionMiddleware" def __getattr__(name: str) -> typing.Any: # pragma: no cover if name == __deprecated__: from starlette.middleware.exceptions import ExceptionMiddleware warnings.warn( f"{__deprecated__} is deprecated on `starlette.exceptions`. " f"Import it from `starlette.middleware.exceptions` instead.", category=DeprecationWarning, stacklevel=3, ) return ExceptionMiddleware raise AttributeError(f"module '{__name__}' has no attribute '{name}'") def __dir__() -> typing.List[str]: return sorted(list(__all__) + [__deprecated__]) # pragma: no cover
1,648
Python
28.981818
88
0.603155
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/datastructures.py
import typing from collections.abc import Sequence from shlex import shlex from urllib.parse import SplitResult, parse_qsl, urlencode, urlsplit from starlette.concurrency import run_in_threadpool from starlette.types import Scope class Address(typing.NamedTuple): host: str port: int _KeyType = typing.TypeVar("_KeyType") # Mapping keys are invariant but their values are covariant since # you can only read them # that is, you can't do `Mapping[str, Animal]()["fido"] = Dog()` _CovariantValueType = typing.TypeVar("_CovariantValueType", covariant=True) class URL: def __init__( self, url: str = "", scope: typing.Optional[Scope] = None, **components: typing.Any, ) -> None: if scope is not None: assert not url, 'Cannot set both "url" and "scope".' assert not components, 'Cannot set both "scope" and "**components".' scheme = scope.get("scheme", "http") server = scope.get("server", None) path = scope.get("root_path", "") + scope["path"] query_string = scope.get("query_string", b"") host_header = None for key, value in scope["headers"]: if key == b"host": host_header = value.decode("latin-1") break if host_header is not None: url = f"{scheme}://{host_header}{path}" elif server is None: url = path else: host, port = server default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme] if port == default_port: url = f"{scheme}://{host}{path}" else: url = f"{scheme}://{host}:{port}{path}" if query_string: url += "?" + query_string.decode() elif components: assert not url, 'Cannot set both "url" and "**components".' url = URL("").replace(**components).components.geturl() self._url = url @property def components(self) -> SplitResult: if not hasattr(self, "_components"): self._components = urlsplit(self._url) return self._components @property def scheme(self) -> str: return self.components.scheme @property def netloc(self) -> str: return self.components.netloc @property def path(self) -> str: return self.components.path @property def query(self) -> str: return self.components.query @property def fragment(self) -> str: return self.components.fragment @property def username(self) -> typing.Union[None, str]: return self.components.username @property def password(self) -> typing.Union[None, str]: return self.components.password @property def hostname(self) -> typing.Union[None, str]: return self.components.hostname @property def port(self) -> typing.Optional[int]: return self.components.port @property def is_secure(self) -> bool: return self.scheme in ("https", "wss") def replace(self, **kwargs: typing.Any) -> "URL": if ( "username" in kwargs or "password" in kwargs or "hostname" in kwargs or "port" in kwargs ): hostname = kwargs.pop("hostname", None) port = kwargs.pop("port", self.port) username = kwargs.pop("username", self.username) password = kwargs.pop("password", self.password) if hostname is None: netloc = self.netloc _, _, hostname = netloc.rpartition("@") if hostname[-1] != "]": hostname = hostname.rsplit(":", 1)[0] netloc = hostname if port is not None: netloc += f":{port}" if username is not None: userpass = username if password is not None: userpass += f":{password}" netloc = f"{userpass}@{netloc}" kwargs["netloc"] = netloc components = self.components._replace(**kwargs) return self.__class__(components.geturl()) def include_query_params(self, **kwargs: typing.Any) -> "URL": params = MultiDict(parse_qsl(self.query, keep_blank_values=True)) params.update({str(key): str(value) for key, value in kwargs.items()}) query = urlencode(params.multi_items()) return self.replace(query=query) def replace_query_params(self, **kwargs: typing.Any) -> "URL": query = urlencode([(str(key), str(value)) for key, value in kwargs.items()]) return self.replace(query=query) def remove_query_params( self, keys: typing.Union[str, typing.Sequence[str]] ) -> "URL": if isinstance(keys, str): keys = [keys] params = MultiDict(parse_qsl(self.query, keep_blank_values=True)) for key in keys: params.pop(key, None) query = urlencode(params.multi_items()) return self.replace(query=query) def __eq__(self, other: typing.Any) -> bool: return str(self) == str(other) def __str__(self) -> str: return self._url def __repr__(self) -> str: url = str(self) if self.password: url = str(self.replace(password="********")) return f"{self.__class__.__name__}({repr(url)})" class URLPath(str): """ A URL path string that may also hold an associated protocol and/or host. Used by the routing to return `url_path_for` matches. """ def __new__(cls, path: str, protocol: str = "", host: str = "") -> "URLPath": assert protocol in ("http", "websocket", "") return str.__new__(cls, path) def __init__(self, path: str, protocol: str = "", host: str = "") -> None: self.protocol = protocol self.host = host def make_absolute_url(self, base_url: typing.Union[str, URL]) -> str: if isinstance(base_url, str): base_url = URL(base_url) if self.protocol: scheme = { "http": {True: "https", False: "http"}, "websocket": {True: "wss", False: "ws"}, }[self.protocol][base_url.is_secure] else: scheme = base_url.scheme netloc = self.host or base_url.netloc path = base_url.path.rstrip("/") + str(self) return str(URL(scheme=scheme, netloc=netloc, path=path)) class Secret: """ Holds a string value that should not be revealed in tracebacks etc. You should cast the value to `str` at the point it is required. """ def __init__(self, value: str): self._value = value def __repr__(self) -> str: class_name = self.__class__.__name__ return f"{class_name}('**********')" def __str__(self) -> str: return self._value def __bool__(self) -> bool: return bool(self._value) class CommaSeparatedStrings(Sequence): def __init__(self, value: typing.Union[str, typing.Sequence[str]]): if isinstance(value, str): splitter = shlex(value, posix=True) splitter.whitespace = "," splitter.whitespace_split = True self._items = [item.strip() for item in splitter] else: self._items = list(value) def __len__(self) -> int: return len(self._items) def __getitem__(self, index: typing.Union[int, slice]) -> typing.Any: return self._items[index] def __iter__(self) -> typing.Iterator[str]: return iter(self._items) def __repr__(self) -> str: class_name = self.__class__.__name__ items = [item for item in self] return f"{class_name}({items!r})" def __str__(self) -> str: return ", ".join(repr(item) for item in self) class ImmutableMultiDict(typing.Mapping[_KeyType, _CovariantValueType]): _dict: typing.Dict[_KeyType, _CovariantValueType] def __init__( self, *args: typing.Union[ "ImmutableMultiDict[_KeyType, _CovariantValueType]", typing.Mapping[_KeyType, _CovariantValueType], typing.Iterable[typing.Tuple[_KeyType, _CovariantValueType]], ], **kwargs: typing.Any, ) -> None: assert len(args) < 2, "Too many arguments." value: typing.Any = args[0] if args else [] if kwargs: value = ( ImmutableMultiDict(value).multi_items() + ImmutableMultiDict(kwargs).multi_items() # type: ignore[operator] ) if not value: _items: typing.List[typing.Tuple[typing.Any, typing.Any]] = [] elif hasattr(value, "multi_items"): value = typing.cast( ImmutableMultiDict[_KeyType, _CovariantValueType], value ) _items = list(value.multi_items()) elif hasattr(value, "items"): value = typing.cast(typing.Mapping[_KeyType, _CovariantValueType], value) _items = list(value.items()) else: value = typing.cast( typing.List[typing.Tuple[typing.Any, typing.Any]], value ) _items = list(value) self._dict = {k: v for k, v in _items} self._list = _items def getlist(self, key: typing.Any) -> typing.List[_CovariantValueType]: return [item_value for item_key, item_value in self._list if item_key == key] def keys(self) -> typing.KeysView[_KeyType]: return self._dict.keys() def values(self) -> typing.ValuesView[_CovariantValueType]: return self._dict.values() def items(self) -> typing.ItemsView[_KeyType, _CovariantValueType]: return self._dict.items() def multi_items(self) -> typing.List[typing.Tuple[_KeyType, _CovariantValueType]]: return list(self._list) def __getitem__(self, key: _KeyType) -> _CovariantValueType: return self._dict[key] def __contains__(self, key: typing.Any) -> bool: return key in self._dict def __iter__(self) -> typing.Iterator[_KeyType]: return iter(self.keys()) def __len__(self) -> int: return len(self._dict) def __eq__(self, other: typing.Any) -> bool: if not isinstance(other, self.__class__): return False return sorted(self._list) == sorted(other._list) def __repr__(self) -> str: class_name = self.__class__.__name__ items = self.multi_items() return f"{class_name}({items!r})" class MultiDict(ImmutableMultiDict[typing.Any, typing.Any]): def __setitem__(self, key: typing.Any, value: typing.Any) -> None: self.setlist(key, [value]) def __delitem__(self, key: typing.Any) -> None: self._list = [(k, v) for k, v in self._list if k != key] del self._dict[key] def pop(self, key: typing.Any, default: typing.Any = None) -> typing.Any: self._list = [(k, v) for k, v in self._list if k != key] return self._dict.pop(key, default) def popitem(self) -> typing.Tuple: key, value = self._dict.popitem() self._list = [(k, v) for k, v in self._list if k != key] return key, value def poplist(self, key: typing.Any) -> typing.List: values = [v for k, v in self._list if k == key] self.pop(key) return values def clear(self) -> None: self._dict.clear() self._list.clear() def setdefault(self, key: typing.Any, default: typing.Any = None) -> typing.Any: if key not in self: self._dict[key] = default self._list.append((key, default)) return self[key] def setlist(self, key: typing.Any, values: typing.List) -> None: if not values: self.pop(key, None) else: existing_items = [(k, v) for (k, v) in self._list if k != key] self._list = existing_items + [(key, value) for value in values] self._dict[key] = values[-1] def append(self, key: typing.Any, value: typing.Any) -> None: self._list.append((key, value)) self._dict[key] = value def update( self, *args: typing.Union[ "MultiDict", typing.Mapping, typing.List[typing.Tuple[typing.Any, typing.Any]], ], **kwargs: typing.Any, ) -> None: value = MultiDict(*args, **kwargs) existing_items = [(k, v) for (k, v) in self._list if k not in value.keys()] self._list = existing_items + value.multi_items() self._dict.update(value) class QueryParams(ImmutableMultiDict[str, str]): """ An immutable multidict. """ def __init__( self, *args: typing.Union[ "ImmutableMultiDict", typing.Mapping, typing.List[typing.Tuple[typing.Any, typing.Any]], str, bytes, ], **kwargs: typing.Any, ) -> None: assert len(args) < 2, "Too many arguments." value = args[0] if args else [] if isinstance(value, str): super().__init__(parse_qsl(value, keep_blank_values=True), **kwargs) elif isinstance(value, bytes): super().__init__( parse_qsl(value.decode("latin-1"), keep_blank_values=True), **kwargs ) else: super().__init__(*args, **kwargs) # type: ignore[arg-type] self._list = [(str(k), str(v)) for k, v in self._list] self._dict = {str(k): str(v) for k, v in self._dict.items()} def __str__(self) -> str: return urlencode(self._list) def __repr__(self) -> str: class_name = self.__class__.__name__ query_string = str(self) return f"{class_name}({query_string!r})" class UploadFile: """ An uploaded file included as part of the request data. """ def __init__( self, file: typing.BinaryIO, *, size: typing.Optional[int] = None, filename: typing.Optional[str] = None, headers: "typing.Optional[Headers]" = None, ) -> None: self.filename = filename self.file = file self.size = size self.headers = headers or Headers() @property def content_type(self) -> typing.Optional[str]: return self.headers.get("content-type", None) @property def _in_memory(self) -> bool: # check for SpooledTemporaryFile._rolled rolled_to_disk = getattr(self.file, "_rolled", True) return not rolled_to_disk async def write(self, data: bytes) -> None: if self.size is not None: self.size += len(data) if self._in_memory: self.file.write(data) else: await run_in_threadpool(self.file.write, data) async def read(self, size: int = -1) -> bytes: if self._in_memory: return self.file.read(size) return await run_in_threadpool(self.file.read, size) async def seek(self, offset: int) -> None: if self._in_memory: self.file.seek(offset) else: await run_in_threadpool(self.file.seek, offset) async def close(self) -> None: if self._in_memory: self.file.close() else: await run_in_threadpool(self.file.close) class FormData(ImmutableMultiDict[str, typing.Union[UploadFile, str]]): """ An immutable multidict, containing both file uploads and text input. """ def __init__( self, *args: typing.Union[ "FormData", typing.Mapping[str, typing.Union[str, UploadFile]], typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]], ], **kwargs: typing.Union[str, UploadFile], ) -> None: super().__init__(*args, **kwargs) async def close(self) -> None: for key, value in self.multi_items(): if isinstance(value, UploadFile): await value.close() class Headers(typing.Mapping[str, str]): """ An immutable, case-insensitive multidict. """ def __init__( self, headers: typing.Optional[typing.Mapping[str, str]] = None, raw: typing.Optional[typing.List[typing.Tuple[bytes, bytes]]] = None, scope: typing.Optional[typing.MutableMapping[str, typing.Any]] = None, ) -> None: self._list: typing.List[typing.Tuple[bytes, bytes]] = [] if headers is not None: assert raw is None, 'Cannot set both "headers" and "raw".' assert scope is None, 'Cannot set both "headers" and "scope".' self._list = [ (key.lower().encode("latin-1"), value.encode("latin-1")) for key, value in headers.items() ] elif raw is not None: assert scope is None, 'Cannot set both "raw" and "scope".' self._list = raw elif scope is not None: # scope["headers"] isn't necessarily a list # it might be a tuple or other iterable self._list = scope["headers"] = list(scope["headers"]) @property def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]: return list(self._list) def keys(self) -> typing.List[str]: # type: ignore[override] return [key.decode("latin-1") for key, value in self._list] def values(self) -> typing.List[str]: # type: ignore[override] return [value.decode("latin-1") for key, value in self._list] def items(self) -> typing.List[typing.Tuple[str, str]]: # type: ignore[override] return [ (key.decode("latin-1"), value.decode("latin-1")) for key, value in self._list ] def getlist(self, key: str) -> typing.List[str]: get_header_key = key.lower().encode("latin-1") return [ item_value.decode("latin-1") for item_key, item_value in self._list if item_key == get_header_key ] def mutablecopy(self) -> "MutableHeaders": return MutableHeaders(raw=self._list[:]) def __getitem__(self, key: str) -> str: get_header_key = key.lower().encode("latin-1") for header_key, header_value in self._list: if header_key == get_header_key: return header_value.decode("latin-1") raise KeyError(key) def __contains__(self, key: typing.Any) -> bool: get_header_key = key.lower().encode("latin-1") for header_key, header_value in self._list: if header_key == get_header_key: return True return False def __iter__(self) -> typing.Iterator[typing.Any]: return iter(self.keys()) def __len__(self) -> int: return len(self._list) def __eq__(self, other: typing.Any) -> bool: if not isinstance(other, Headers): return False return sorted(self._list) == sorted(other._list) def __repr__(self) -> str: class_name = self.__class__.__name__ as_dict = dict(self.items()) if len(as_dict) == len(self): return f"{class_name}({as_dict!r})" return f"{class_name}(raw={self.raw!r})" class MutableHeaders(Headers): def __setitem__(self, key: str, value: str) -> None: """ Set the header `key` to `value`, removing any duplicate entries. Retains insertion order. """ set_key = key.lower().encode("latin-1") set_value = value.encode("latin-1") found_indexes: "typing.List[int]" = [] for idx, (item_key, item_value) in enumerate(self._list): if item_key == set_key: found_indexes.append(idx) for idx in reversed(found_indexes[1:]): del self._list[idx] if found_indexes: idx = found_indexes[0] self._list[idx] = (set_key, set_value) else: self._list.append((set_key, set_value)) def __delitem__(self, key: str) -> None: """ Remove the header `key`. """ del_key = key.lower().encode("latin-1") pop_indexes: "typing.List[int]" = [] for idx, (item_key, item_value) in enumerate(self._list): if item_key == del_key: pop_indexes.append(idx) for idx in reversed(pop_indexes): del self._list[idx] def __ior__(self, other: typing.Mapping[str, str]) -> "MutableHeaders": if not isinstance(other, typing.Mapping): raise TypeError(f"Expected a mapping but got {other.__class__.__name__}") self.update(other) return self def __or__(self, other: typing.Mapping[str, str]) -> "MutableHeaders": if not isinstance(other, typing.Mapping): raise TypeError(f"Expected a mapping but got {other.__class__.__name__}") new = self.mutablecopy() new.update(other) return new @property def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]: return self._list def setdefault(self, key: str, value: str) -> str: """ If the header `key` does not exist, then set it to `value`. Returns the header value. """ set_key = key.lower().encode("latin-1") set_value = value.encode("latin-1") for idx, (item_key, item_value) in enumerate(self._list): if item_key == set_key: return item_value.decode("latin-1") self._list.append((set_key, set_value)) return value def update(self, other: typing.Mapping[str, str]) -> None: for key, val in other.items(): self[key] = val def append(self, key: str, value: str) -> None: """ Append a header, preserving any duplicate entries. """ append_key = key.lower().encode("latin-1") append_value = value.encode("latin-1") self._list.append((append_key, append_value)) def add_vary_header(self, vary: str) -> None: existing = self.get("vary") if existing is not None: vary = ", ".join([existing, vary]) self["vary"] = vary class State: """ An object that can be used to store arbitrary state. Used for `request.state` and `app.state`. """ _state: typing.Dict[str, typing.Any] def __init__(self, state: typing.Optional[typing.Dict[str, typing.Any]] = None): if state is None: state = {} super().__setattr__("_state", state) def __setattr__(self, key: typing.Any, value: typing.Any) -> None: self._state[key] = value def __getattr__(self, key: typing.Any) -> typing.Any: try: return self._state[key] except KeyError: message = "'{}' object has no attribute '{}'" raise AttributeError(message.format(self.__class__.__name__, key)) def __delattr__(self, key: typing.Any) -> None: del self._state[key]
23,092
Python
31.571227
87
0.554867
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/responses.py
import http.cookies import json import os import stat import sys import typing from datetime import datetime from email.utils import format_datetime, formatdate from functools import partial from mimetypes import guess_type as mimetypes_guess_type from urllib.parse import quote import anyio from starlette._compat import md5_hexdigest from starlette.background import BackgroundTask from starlette.concurrency import iterate_in_threadpool from starlette.datastructures import URL, MutableHeaders from starlette.types import Receive, Scope, Send if sys.version_info >= (3, 8): # pragma: no cover from typing import Literal else: # pragma: no cover from typing_extensions import Literal # Workaround for adding samesite support to pre 3.8 python http.cookies.Morsel._reserved["samesite"] = "SameSite" # type: ignore[attr-defined] # Compatibility wrapper for `mimetypes.guess_type` to support `os.PathLike` on <py3.8 def guess_type( url: typing.Union[str, "os.PathLike[str]"], strict: bool = True ) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]: if sys.version_info < (3, 8): # pragma: no cover url = os.fspath(url) return mimetypes_guess_type(url, strict) class Response: media_type = None charset = "utf-8" def __init__( self, content: typing.Any = None, status_code: int = 200, headers: typing.Optional[typing.Mapping[str, str]] = None, media_type: typing.Optional[str] = None, background: typing.Optional[BackgroundTask] = None, ) -> None: self.status_code = status_code if media_type is not None: self.media_type = media_type self.background = background self.body = self.render(content) self.init_headers(headers) def render(self, content: typing.Any) -> bytes: if content is None: return b"" if isinstance(content, bytes): return content return content.encode(self.charset) def init_headers( self, headers: typing.Optional[typing.Mapping[str, str]] = None ) -> None: if headers is None: raw_headers: typing.List[typing.Tuple[bytes, bytes]] = [] populate_content_length = True populate_content_type = True else: raw_headers = [ (k.lower().encode("latin-1"), v.encode("latin-1")) for k, v in headers.items() ] keys = [h[0] for h in raw_headers] populate_content_length = b"content-length" not in keys populate_content_type = b"content-type" not in keys body = getattr(self, "body", None) if ( body is not None and populate_content_length and not (self.status_code < 200 or self.status_code in (204, 304)) ): content_length = str(len(body)) raw_headers.append((b"content-length", content_length.encode("latin-1"))) content_type = self.media_type if content_type is not None and populate_content_type: if content_type.startswith("text/"): content_type += "; charset=" + self.charset raw_headers.append((b"content-type", content_type.encode("latin-1"))) self.raw_headers = raw_headers @property def headers(self) -> MutableHeaders: if not hasattr(self, "_headers"): self._headers = MutableHeaders(raw=self.raw_headers) return self._headers def set_cookie( self, key: str, value: str = "", max_age: typing.Optional[int] = None, expires: typing.Optional[typing.Union[datetime, str, int]] = None, path: str = "/", domain: typing.Optional[str] = None, secure: bool = False, httponly: bool = False, samesite: typing.Optional[Literal["lax", "strict", "none"]] = "lax", ) -> None: cookie: "http.cookies.BaseCookie[str]" = http.cookies.SimpleCookie() cookie[key] = value if max_age is not None: cookie[key]["max-age"] = max_age if expires is not None: if isinstance(expires, datetime): cookie[key]["expires"] = format_datetime(expires, usegmt=True) else: cookie[key]["expires"] = expires if path is not None: cookie[key]["path"] = path if domain is not None: cookie[key]["domain"] = domain if secure: cookie[key]["secure"] = True if httponly: cookie[key]["httponly"] = True if samesite is not None: assert samesite.lower() in [ "strict", "lax", "none", ], "samesite must be either 'strict', 'lax' or 'none'" cookie[key]["samesite"] = samesite cookie_val = cookie.output(header="").strip() self.raw_headers.append((b"set-cookie", cookie_val.encode("latin-1"))) def delete_cookie( self, key: str, path: str = "/", domain: typing.Optional[str] = None, secure: bool = False, httponly: bool = False, samesite: typing.Optional[Literal["lax", "strict", "none"]] = "lax", ) -> None: self.set_cookie( key, max_age=0, expires=0, path=path, domain=domain, secure=secure, httponly=httponly, samesite=samesite, ) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: await send( { "type": "http.response.start", "status": self.status_code, "headers": self.raw_headers, } ) await send({"type": "http.response.body", "body": self.body}) if self.background is not None: await self.background() class HTMLResponse(Response): media_type = "text/html" class PlainTextResponse(Response): media_type = "text/plain" class JSONResponse(Response): media_type = "application/json" def __init__( self, content: typing.Any, status_code: int = 200, headers: typing.Optional[typing.Dict[str, str]] = None, media_type: typing.Optional[str] = None, background: typing.Optional[BackgroundTask] = None, ) -> None: super().__init__(content, status_code, headers, media_type, background) def render(self, content: typing.Any) -> bytes: return json.dumps( content, ensure_ascii=False, allow_nan=False, indent=None, separators=(",", ":"), ).encode("utf-8") class RedirectResponse(Response): def __init__( self, url: typing.Union[str, URL], status_code: int = 307, headers: typing.Optional[typing.Mapping[str, str]] = None, background: typing.Optional[BackgroundTask] = None, ) -> None: super().__init__( content=b"", status_code=status_code, headers=headers, background=background ) self.headers["location"] = quote(str(url), safe=":/%#?=@[]!$&'()*+,;") Content = typing.Union[str, bytes] SyncContentStream = typing.Iterator[Content] AsyncContentStream = typing.AsyncIterable[Content] ContentStream = typing.Union[AsyncContentStream, SyncContentStream] class StreamingResponse(Response): body_iterator: AsyncContentStream def __init__( self, content: ContentStream, status_code: int = 200, headers: typing.Optional[typing.Mapping[str, str]] = None, media_type: typing.Optional[str] = None, background: typing.Optional[BackgroundTask] = None, ) -> None: if isinstance(content, typing.AsyncIterable): self.body_iterator = content else: self.body_iterator = iterate_in_threadpool(content) self.status_code = status_code self.media_type = self.media_type if media_type is None else media_type self.background = background self.init_headers(headers) async def listen_for_disconnect(self, receive: Receive) -> None: while True: message = await receive() if message["type"] == "http.disconnect": break async def stream_response(self, send: Send) -> None: await send( { "type": "http.response.start", "status": self.status_code, "headers": self.raw_headers, } ) async for chunk in self.body_iterator: if not isinstance(chunk, bytes): chunk = chunk.encode(self.charset) await send({"type": "http.response.body", "body": chunk, "more_body": True}) await send({"type": "http.response.body", "body": b"", "more_body": False}) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: async with anyio.create_task_group() as task_group: async def wrap(func: "typing.Callable[[], typing.Awaitable[None]]") -> None: await func() task_group.cancel_scope.cancel() task_group.start_soon(wrap, partial(self.stream_response, send)) await wrap(partial(self.listen_for_disconnect, receive)) if self.background is not None: await self.background() class FileResponse(Response): chunk_size = 64 * 1024 def __init__( self, path: typing.Union[str, "os.PathLike[str]"], status_code: int = 200, headers: typing.Optional[typing.Mapping[str, str]] = None, media_type: typing.Optional[str] = None, background: typing.Optional[BackgroundTask] = None, filename: typing.Optional[str] = None, stat_result: typing.Optional[os.stat_result] = None, method: typing.Optional[str] = None, content_disposition_type: str = "attachment", ) -> None: self.path = path self.status_code = status_code self.filename = filename self.send_header_only = method is not None and method.upper() == "HEAD" if media_type is None: media_type = guess_type(filename or path)[0] or "text/plain" self.media_type = media_type self.background = background self.init_headers(headers) if self.filename is not None: content_disposition_filename = quote(self.filename) if content_disposition_filename != self.filename: content_disposition = "{}; filename*=utf-8''{}".format( content_disposition_type, content_disposition_filename ) else: content_disposition = '{}; filename="{}"'.format( content_disposition_type, self.filename ) self.headers.setdefault("content-disposition", content_disposition) self.stat_result = stat_result if stat_result is not None: self.set_stat_headers(stat_result) def set_stat_headers(self, stat_result: os.stat_result) -> None: content_length = str(stat_result.st_size) last_modified = formatdate(stat_result.st_mtime, usegmt=True) etag_base = str(stat_result.st_mtime) + "-" + str(stat_result.st_size) etag = md5_hexdigest(etag_base.encode(), usedforsecurity=False) self.headers.setdefault("content-length", content_length) self.headers.setdefault("last-modified", last_modified) self.headers.setdefault("etag", etag) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if self.stat_result is None: try: stat_result = await anyio.to_thread.run_sync(os.stat, self.path) self.set_stat_headers(stat_result) except FileNotFoundError: raise RuntimeError(f"File at path {self.path} does not exist.") else: mode = stat_result.st_mode if not stat.S_ISREG(mode): raise RuntimeError(f"File at path {self.path} is not a file.") await send( { "type": "http.response.start", "status": self.status_code, "headers": self.raw_headers, } ) if self.send_header_only: await send({"type": "http.response.body", "body": b"", "more_body": False}) else: async with await anyio.open_file(self.path, mode="rb") as file: more_body = True while more_body: chunk = await file.read(self.chunk_size) more_body = len(chunk) == self.chunk_size await send( { "type": "http.response.body", "body": chunk, "more_body": more_body, } ) if self.background is not None: await self.background()
13,147
Python
34.825613
88
0.573135
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/background.py
import sys import typing if sys.version_info >= (3, 10): # pragma: no cover from typing import ParamSpec else: # pragma: no cover from typing_extensions import ParamSpec from starlette._utils import is_async_callable from starlette.concurrency import run_in_threadpool P = ParamSpec("P") class BackgroundTask: def __init__( self, func: typing.Callable[P, typing.Any], *args: P.args, **kwargs: P.kwargs ) -> None: self.func = func self.args = args self.kwargs = kwargs self.is_async = is_async_callable(func) async def __call__(self) -> None: if self.is_async: await self.func(*self.args, **self.kwargs) else: await run_in_threadpool(self.func, *self.args, **self.kwargs) class BackgroundTasks(BackgroundTask): def __init__(self, tasks: typing.Optional[typing.Sequence[BackgroundTask]] = None): self.tasks = list(tasks) if tasks else [] def add_task( self, func: typing.Callable[P, typing.Any], *args: P.args, **kwargs: P.kwargs ) -> None: task = BackgroundTask(func, *args, **kwargs) self.tasks.append(task) async def __call__(self) -> None: for task in self.tasks: await task()
1,259
Python
27.636363
87
0.622716
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/_compat.py
import hashlib # Compat wrapper to always include the `usedforsecurity=...` parameter, # which is only added from Python 3.9 onwards. # We use this flag to indicate that we use `md5` hashes only for non-security # cases (our ETag checksums). # If we don't indicate that we're using MD5 for non-security related reasons, # then attempting to use this function will raise an error when used # environments which enable a strict "FIPs mode". # # See issue: https://github.com/encode/starlette/issues/1365 try: # check if the Python version supports the parameter # using usedforsecurity=False to avoid an exception on FIPS systems # that reject usedforsecurity=True hashlib.md5(b"data", usedforsecurity=False) # type: ignore[call-arg] def md5_hexdigest( data: bytes, *, usedforsecurity: bool = True ) -> str: # pragma: no cover return hashlib.md5( # type: ignore[call-arg] data, usedforsecurity=usedforsecurity ).hexdigest() except TypeError: # pragma: no cover def md5_hexdigest(data: bytes, *, usedforsecurity: bool = True) -> str: return hashlib.md5(data).hexdigest()
1,149
Python
37.333332
77
0.706701
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/templating.py
import typing from os import PathLike from starlette.background import BackgroundTask from starlette.requests import Request from starlette.responses import Response from starlette.types import Receive, Scope, Send try: import jinja2 # @contextfunction was renamed to @pass_context in Jinja 3.0, and was removed in 3.1 # hence we try to get pass_context (most installs will be >=3.1) # and fall back to contextfunction, # adding a type ignore for mypy to let us access an attribute that may not exist if hasattr(jinja2, "pass_context"): pass_context = jinja2.pass_context else: # pragma: nocover pass_context = jinja2.contextfunction # type: ignore[attr-defined] except ImportError: # pragma: nocover jinja2 = None # type: ignore[assignment] class _TemplateResponse(Response): media_type = "text/html" def __init__( self, template: typing.Any, context: dict, status_code: int = 200, headers: typing.Optional[typing.Mapping[str, str]] = None, media_type: typing.Optional[str] = None, background: typing.Optional[BackgroundTask] = None, ): self.template = template self.context = context content = template.render(context) super().__init__(content, status_code, headers, media_type, background) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: request = self.context.get("request", {}) extensions = request.get("extensions", {}) if "http.response.debug" in extensions: await send( { "type": "http.response.debug", "info": { "template": self.template, "context": self.context, }, } ) await super().__call__(scope, receive, send) class Jinja2Templates: """ templates = Jinja2Templates("templates") return templates.TemplateResponse("index.html", {"request": request}) """ def __init__( self, directory: typing.Union[str, PathLike], context_processors: typing.Optional[ typing.List[typing.Callable[[Request], typing.Dict[str, typing.Any]]] ] = None, **env_options: typing.Any ) -> None: assert jinja2 is not None, "jinja2 must be installed to use Jinja2Templates" self.env = self._create_env(directory, **env_options) self.context_processors = context_processors or [] def _create_env( self, directory: typing.Union[str, PathLike], **env_options: typing.Any ) -> "jinja2.Environment": @pass_context def url_for(context: dict, name: str, **path_params: typing.Any) -> str: request = context["request"] return request.url_for(name, **path_params) loader = jinja2.FileSystemLoader(directory) env_options.setdefault("loader", loader) env_options.setdefault("autoescape", True) env = jinja2.Environment(**env_options) env.globals["url_for"] = url_for return env def get_template(self, name: str) -> "jinja2.Template": return self.env.get_template(name) def TemplateResponse( self, name: str, context: dict, status_code: int = 200, headers: typing.Optional[typing.Mapping[str, str]] = None, media_type: typing.Optional[str] = None, background: typing.Optional[BackgroundTask] = None, ) -> _TemplateResponse: if "request" not in context: raise ValueError('context must include a "request" key') request = typing.cast(Request, context["request"]) for context_processor in self.context_processors: context.update(context_processor(request)) template = self.get_template(name) return _TemplateResponse( template, context, status_code=status_code, headers=headers, media_type=media_type, background=background, )
4,120
Python
33.341666
88
0.606311
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/__init__.py
__version__ = "0.25.0"
23
Python
10.999995
22
0.478261
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/websockets.py
import enum import json import typing from starlette.requests import HTTPConnection from starlette.types import Message, Receive, Scope, Send class WebSocketState(enum.Enum): CONNECTING = 0 CONNECTED = 1 DISCONNECTED = 2 class WebSocketDisconnect(Exception): def __init__(self, code: int = 1000, reason: typing.Optional[str] = None) -> None: self.code = code self.reason = reason or "" class WebSocket(HTTPConnection): def __init__(self, scope: Scope, receive: Receive, send: Send) -> None: super().__init__(scope) assert scope["type"] == "websocket" self._receive = receive self._send = send self.client_state = WebSocketState.CONNECTING self.application_state = WebSocketState.CONNECTING async def receive(self) -> Message: """ Receive ASGI websocket messages, ensuring valid state transitions. """ if self.client_state == WebSocketState.CONNECTING: message = await self._receive() message_type = message["type"] if message_type != "websocket.connect": raise RuntimeError( 'Expected ASGI message "websocket.connect", ' f"but got {message_type!r}" ) self.client_state = WebSocketState.CONNECTED return message elif self.client_state == WebSocketState.CONNECTED: message = await self._receive() message_type = message["type"] if message_type not in {"websocket.receive", "websocket.disconnect"}: raise RuntimeError( 'Expected ASGI message "websocket.receive" or ' f'"websocket.disconnect", but got {message_type!r}' ) if message_type == "websocket.disconnect": self.client_state = WebSocketState.DISCONNECTED return message else: raise RuntimeError( 'Cannot call "receive" once a disconnect message has been received.' ) async def send(self, message: Message) -> None: """ Send ASGI websocket messages, ensuring valid state transitions. """ if self.application_state == WebSocketState.CONNECTING: message_type = message["type"] if message_type not in {"websocket.accept", "websocket.close"}: raise RuntimeError( 'Expected ASGI message "websocket.connect", ' f"but got {message_type!r}" ) if message_type == "websocket.close": self.application_state = WebSocketState.DISCONNECTED else: self.application_state = WebSocketState.CONNECTED await self._send(message) elif self.application_state == WebSocketState.CONNECTED: message_type = message["type"] if message_type not in {"websocket.send", "websocket.close"}: raise RuntimeError( 'Expected ASGI message "websocket.send" or "websocket.close", ' f"but got {message_type!r}" ) if message_type == "websocket.close": self.application_state = WebSocketState.DISCONNECTED await self._send(message) else: raise RuntimeError('Cannot call "send" once a close message has been sent.') async def accept( self, subprotocol: typing.Optional[str] = None, headers: typing.Optional[typing.Iterable[typing.Tuple[bytes, bytes]]] = None, ) -> None: headers = headers or [] if self.client_state == WebSocketState.CONNECTING: # If we haven't yet seen the 'connect' message, then wait for it first. await self.receive() await self.send( {"type": "websocket.accept", "subprotocol": subprotocol, "headers": headers} ) def _raise_on_disconnect(self, message: Message) -> None: if message["type"] == "websocket.disconnect": raise WebSocketDisconnect(message["code"]) async def receive_text(self) -> str: if self.application_state != WebSocketState.CONNECTED: raise RuntimeError( 'WebSocket is not connected. Need to call "accept" first.' ) message = await self.receive() self._raise_on_disconnect(message) return message["text"] async def receive_bytes(self) -> bytes: if self.application_state != WebSocketState.CONNECTED: raise RuntimeError( 'WebSocket is not connected. Need to call "accept" first.' ) message = await self.receive() self._raise_on_disconnect(message) return message["bytes"] async def receive_json(self, mode: str = "text") -> typing.Any: if mode not in {"text", "binary"}: raise RuntimeError('The "mode" argument should be "text" or "binary".') if self.application_state != WebSocketState.CONNECTED: raise RuntimeError( 'WebSocket is not connected. Need to call "accept" first.' ) message = await self.receive() self._raise_on_disconnect(message) if mode == "text": text = message["text"] else: text = message["bytes"].decode("utf-8") return json.loads(text) async def iter_text(self) -> typing.AsyncIterator[str]: try: while True: yield await self.receive_text() except WebSocketDisconnect: pass async def iter_bytes(self) -> typing.AsyncIterator[bytes]: try: while True: yield await self.receive_bytes() except WebSocketDisconnect: pass async def iter_json(self) -> typing.AsyncIterator[typing.Any]: try: while True: yield await self.receive_json() except WebSocketDisconnect: pass async def send_text(self, data: str) -> None: await self.send({"type": "websocket.send", "text": data}) async def send_bytes(self, data: bytes) -> None: await self.send({"type": "websocket.send", "bytes": data}) async def send_json(self, data: typing.Any, mode: str = "text") -> None: if mode not in {"text", "binary"}: raise RuntimeError('The "mode" argument should be "text" or "binary".') text = json.dumps(data) if mode == "text": await self.send({"type": "websocket.send", "text": text}) else: await self.send({"type": "websocket.send", "bytes": text.encode("utf-8")}) async def close( self, code: int = 1000, reason: typing.Optional[str] = None ) -> None: await self.send( {"type": "websocket.close", "code": code, "reason": reason or ""} ) class WebSocketClose: def __init__(self, code: int = 1000, reason: typing.Optional[str] = None) -> None: self.code = code self.reason = reason or "" async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: await send( {"type": "websocket.close", "code": self.code, "reason": self.reason} )
7,317
Python
36.721649
88
0.574962
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/authentication.py
import functools import inspect import typing from urllib.parse import urlencode from starlette._utils import is_async_callable from starlette.exceptions import HTTPException from starlette.requests import HTTPConnection, Request from starlette.responses import RedirectResponse, Response from starlette.websockets import WebSocket _CallableType = typing.TypeVar("_CallableType", bound=typing.Callable) def has_required_scope(conn: HTTPConnection, scopes: typing.Sequence[str]) -> bool: for scope in scopes: if scope not in conn.auth.scopes: return False return True def requires( scopes: typing.Union[str, typing.Sequence[str]], status_code: int = 403, redirect: typing.Optional[str] = None, ) -> typing.Callable[[_CallableType], _CallableType]: scopes_list = [scopes] if isinstance(scopes, str) else list(scopes) def decorator(func: typing.Callable) -> typing.Callable: sig = inspect.signature(func) for idx, parameter in enumerate(sig.parameters.values()): if parameter.name == "request" or parameter.name == "websocket": type_ = parameter.name break else: raise Exception( f'No "request" or "websocket" argument on function "{func}"' ) if type_ == "websocket": # Handle websocket functions. (Always async) @functools.wraps(func) async def websocket_wrapper( *args: typing.Any, **kwargs: typing.Any ) -> None: websocket = kwargs.get( "websocket", args[idx] if idx < len(args) else None ) assert isinstance(websocket, WebSocket) if not has_required_scope(websocket, scopes_list): await websocket.close() else: await func(*args, **kwargs) return websocket_wrapper elif is_async_callable(func): # Handle async request/response functions. @functools.wraps(func) async def async_wrapper( *args: typing.Any, **kwargs: typing.Any ) -> Response: request = kwargs.get("request", args[idx] if idx < len(args) else None) assert isinstance(request, Request) if not has_required_scope(request, scopes_list): if redirect is not None: orig_request_qparam = urlencode({"next": str(request.url)}) next_url = "{redirect_path}?{orig_request}".format( redirect_path=request.url_for(redirect), orig_request=orig_request_qparam, ) return RedirectResponse(url=next_url, status_code=303) raise HTTPException(status_code=status_code) return await func(*args, **kwargs) return async_wrapper else: # Handle sync request/response functions. @functools.wraps(func) def sync_wrapper(*args: typing.Any, **kwargs: typing.Any) -> Response: request = kwargs.get("request", args[idx] if idx < len(args) else None) assert isinstance(request, Request) if not has_required_scope(request, scopes_list): if redirect is not None: orig_request_qparam = urlencode({"next": str(request.url)}) next_url = "{redirect_path}?{orig_request}".format( redirect_path=request.url_for(redirect), orig_request=orig_request_qparam, ) return RedirectResponse(url=next_url, status_code=303) raise HTTPException(status_code=status_code) return func(*args, **kwargs) return sync_wrapper return decorator # type: ignore[return-value] class AuthenticationError(Exception): pass class AuthenticationBackend: async def authenticate( self, conn: HTTPConnection ) -> typing.Optional[typing.Tuple["AuthCredentials", "BaseUser"]]: raise NotImplementedError() # pragma: no cover class AuthCredentials: def __init__(self, scopes: typing.Optional[typing.Sequence[str]] = None): self.scopes = [] if scopes is None else list(scopes) class BaseUser: @property def is_authenticated(self) -> bool: raise NotImplementedError() # pragma: no cover @property def display_name(self) -> str: raise NotImplementedError() # pragma: no cover @property def identity(self) -> str: raise NotImplementedError() # pragma: no cover class SimpleUser(BaseUser): def __init__(self, username: str) -> None: self.username = username @property def is_authenticated(self) -> bool: return True @property def display_name(self) -> str: return self.username class UnauthenticatedUser(BaseUser): @property def is_authenticated(self) -> bool: return False @property def display_name(self) -> str: return ""
5,246
Python
33.071428
87
0.585208
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/formparsers.py
import typing from dataclasses import dataclass, field from enum import Enum from tempfile import SpooledTemporaryFile from urllib.parse import unquote_plus from starlette.datastructures import FormData, Headers, UploadFile try: import multipart from multipart.multipart import parse_options_header except ImportError: # pragma: nocover parse_options_header = None multipart = None class FormMessage(Enum): FIELD_START = 1 FIELD_NAME = 2 FIELD_DATA = 3 FIELD_END = 4 END = 5 @dataclass class MultipartPart: content_disposition: typing.Optional[bytes] = None field_name: str = "" data: bytes = b"" file: typing.Optional[UploadFile] = None item_headers: typing.List[typing.Tuple[bytes, bytes]] = field(default_factory=list) def _user_safe_decode(src: bytes, codec: str) -> str: try: return src.decode(codec) except (UnicodeDecodeError, LookupError): return src.decode("latin-1") class MultiPartException(Exception): def __init__(self, message: str) -> None: self.message = message class FormParser: def __init__( self, headers: Headers, stream: typing.AsyncGenerator[bytes, None] ) -> None: assert ( multipart is not None ), "The `python-multipart` library must be installed to use form parsing." self.headers = headers self.stream = stream self.messages: typing.List[typing.Tuple[FormMessage, bytes]] = [] def on_field_start(self) -> None: message = (FormMessage.FIELD_START, b"") self.messages.append(message) def on_field_name(self, data: bytes, start: int, end: int) -> None: message = (FormMessage.FIELD_NAME, data[start:end]) self.messages.append(message) def on_field_data(self, data: bytes, start: int, end: int) -> None: message = (FormMessage.FIELD_DATA, data[start:end]) self.messages.append(message) def on_field_end(self) -> None: message = (FormMessage.FIELD_END, b"") self.messages.append(message) def on_end(self) -> None: message = (FormMessage.END, b"") self.messages.append(message) async def parse(self) -> FormData: # Callbacks dictionary. callbacks = { "on_field_start": self.on_field_start, "on_field_name": self.on_field_name, "on_field_data": self.on_field_data, "on_field_end": self.on_field_end, "on_end": self.on_end, } # Create the parser. parser = multipart.QuerystringParser(callbacks) field_name = b"" field_value = b"" items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = [] # Feed the parser with data from the request. async for chunk in self.stream: if chunk: parser.write(chunk) else: parser.finalize() messages = list(self.messages) self.messages.clear() for message_type, message_bytes in messages: if message_type == FormMessage.FIELD_START: field_name = b"" field_value = b"" elif message_type == FormMessage.FIELD_NAME: field_name += message_bytes elif message_type == FormMessage.FIELD_DATA: field_value += message_bytes elif message_type == FormMessage.FIELD_END: name = unquote_plus(field_name.decode("latin-1")) value = unquote_plus(field_value.decode("latin-1")) items.append((name, value)) return FormData(items) class MultiPartParser: max_file_size = 1024 * 1024 def __init__( self, headers: Headers, stream: typing.AsyncGenerator[bytes, None], *, max_files: typing.Union[int, float] = 1000, max_fields: typing.Union[int, float] = 1000, ) -> None: assert ( multipart is not None ), "The `python-multipart` library must be installed to use form parsing." self.headers = headers self.stream = stream self.max_files = max_files self.max_fields = max_fields self.items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = [] self._current_files = 0 self._current_fields = 0 self._current_partial_header_name: bytes = b"" self._current_partial_header_value: bytes = b"" self._current_part = MultipartPart() self._charset = "" self._file_parts_to_write: typing.List[typing.Tuple[MultipartPart, bytes]] = [] self._file_parts_to_finish: typing.List[MultipartPart] = [] self._files_to_close_on_error: typing.List[SpooledTemporaryFile] = [] def on_part_begin(self) -> None: self._current_part = MultipartPart() def on_part_data(self, data: bytes, start: int, end: int) -> None: message_bytes = data[start:end] if self._current_part.file is None: self._current_part.data += message_bytes else: self._file_parts_to_write.append((self._current_part, message_bytes)) def on_part_end(self) -> None: if self._current_part.file is None: self.items.append( ( self._current_part.field_name, _user_safe_decode(self._current_part.data, self._charset), ) ) else: self._file_parts_to_finish.append(self._current_part) # The file can be added to the items right now even though it's not # finished yet, because it will be finished in the `parse()` method, before # self.items is used in the return value. self.items.append((self._current_part.field_name, self._current_part.file)) def on_header_field(self, data: bytes, start: int, end: int) -> None: self._current_partial_header_name += data[start:end] def on_header_value(self, data: bytes, start: int, end: int) -> None: self._current_partial_header_value += data[start:end] def on_header_end(self) -> None: field = self._current_partial_header_name.lower() if field == b"content-disposition": self._current_part.content_disposition = self._current_partial_header_value self._current_part.item_headers.append( (field, self._current_partial_header_value) ) self._current_partial_header_name = b"" self._current_partial_header_value = b"" def on_headers_finished(self) -> None: disposition, options = parse_options_header( self._current_part.content_disposition ) try: self._current_part.field_name = _user_safe_decode( options[b"name"], self._charset ) except KeyError: raise MultiPartException( 'The Content-Disposition header field "name" must be ' "provided." ) if b"filename" in options: self._current_files += 1 if self._current_files > self.max_files: raise MultiPartException( f"Too many files. Maximum number of files is {self.max_files}." ) filename = _user_safe_decode(options[b"filename"], self._charset) tempfile = SpooledTemporaryFile(max_size=self.max_file_size) self._files_to_close_on_error.append(tempfile) self._current_part.file = UploadFile( file=tempfile, # type: ignore[arg-type] size=0, filename=filename, headers=Headers(raw=self._current_part.item_headers), ) else: self._current_fields += 1 if self._current_fields > self.max_fields: raise MultiPartException( f"Too many fields. Maximum number of fields is {self.max_fields}." ) self._current_part.file = None def on_end(self) -> None: pass async def parse(self) -> FormData: # Parse the Content-Type header to get the multipart boundary. _, params = parse_options_header(self.headers["Content-Type"]) charset = params.get(b"charset", "utf-8") if type(charset) == bytes: charset = charset.decode("latin-1") self._charset = charset try: boundary = params[b"boundary"] except KeyError: raise MultiPartException("Missing boundary in multipart.") # Callbacks dictionary. callbacks = { "on_part_begin": self.on_part_begin, "on_part_data": self.on_part_data, "on_part_end": self.on_part_end, "on_header_field": self.on_header_field, "on_header_value": self.on_header_value, "on_header_end": self.on_header_end, "on_headers_finished": self.on_headers_finished, "on_end": self.on_end, } # Create the parser. parser = multipart.MultipartParser(boundary, callbacks) try: # Feed the parser with data from the request. async for chunk in self.stream: parser.write(chunk) # Write file data, it needs to use await with the UploadFile methods # that call the corresponding file methods *in a threadpool*, # otherwise, if they were called directly in the callback methods above # (regular, non-async functions), that would block the event loop in # the main thread. for part, data in self._file_parts_to_write: assert part.file # for type checkers await part.file.write(data) for part in self._file_parts_to_finish: assert part.file # for type checkers await part.file.seek(0) self._file_parts_to_write.clear() self._file_parts_to_finish.clear() except MultiPartException as exc: # Close all the files if there was an error. for file in self._files_to_close_on_error: file.close() raise exc parser.finalize() return FormData(self.items)
10,415
Python
36.602888
87
0.579741
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/testclient.py
import contextlib import inspect import io import json import math import queue import sys import typing import warnings from concurrent.futures import Future from types import GeneratorType from urllib.parse import unquote, urljoin import anyio import anyio.from_thread import httpx from anyio.streams.stapled import StapledObjectStream from starlette._utils import is_async_callable from starlette.types import ASGIApp, Message, Receive, Scope, Send from starlette.websockets import WebSocketDisconnect if sys.version_info >= (3, 8): # pragma: no cover from typing import TypedDict else: # pragma: no cover from typing_extensions import TypedDict _PortalFactoryType = typing.Callable[ [], typing.ContextManager[anyio.abc.BlockingPortal] ] ASGIInstance = typing.Callable[[Receive, Send], typing.Awaitable[None]] ASGI2App = typing.Callable[[Scope], ASGIInstance] ASGI3App = typing.Callable[[Scope, Receive, Send], typing.Awaitable[None]] _RequestData = typing.Mapping[str, typing.Union[str, typing.Iterable[str]]] def _is_asgi3(app: typing.Union[ASGI2App, ASGI3App]) -> bool: if inspect.isclass(app): return hasattr(app, "__await__") return is_async_callable(app) class _WrapASGI2: """ Provide an ASGI3 interface onto an ASGI2 app. """ def __init__(self, app: ASGI2App) -> None: self.app = app async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: instance = self.app(scope) await instance(receive, send) class _AsyncBackend(TypedDict): backend: str backend_options: typing.Dict[str, typing.Any] class _Upgrade(Exception): def __init__(self, session: "WebSocketTestSession") -> None: self.session = session class WebSocketTestSession: def __init__( self, app: ASGI3App, scope: Scope, portal_factory: _PortalFactoryType, ) -> None: self.app = app self.scope = scope self.accepted_subprotocol = None self.portal_factory = portal_factory self._receive_queue: "queue.Queue[typing.Any]" = queue.Queue() self._send_queue: "queue.Queue[typing.Any]" = queue.Queue() self.extra_headers = None def __enter__(self) -> "WebSocketTestSession": self.exit_stack = contextlib.ExitStack() self.portal = self.exit_stack.enter_context(self.portal_factory()) try: _: "Future[None]" = self.portal.start_task_soon(self._run) self.send({"type": "websocket.connect"}) message = self.receive() self._raise_on_close(message) except Exception: self.exit_stack.close() raise self.accepted_subprotocol = message.get("subprotocol", None) self.extra_headers = message.get("headers", None) return self def __exit__(self, *args: typing.Any) -> None: try: self.close(1000) finally: self.exit_stack.close() while not self._send_queue.empty(): message = self._send_queue.get() if isinstance(message, BaseException): raise message async def _run(self) -> None: """ The sub-thread in which the websocket session runs. """ scope = self.scope receive = self._asgi_receive send = self._asgi_send try: await self.app(scope, receive, send) except BaseException as exc: self._send_queue.put(exc) raise async def _asgi_receive(self) -> Message: while self._receive_queue.empty(): await anyio.sleep(0) return self._receive_queue.get() async def _asgi_send(self, message: Message) -> None: self._send_queue.put(message) def _raise_on_close(self, message: Message) -> None: if message["type"] == "websocket.close": raise WebSocketDisconnect( message.get("code", 1000), message.get("reason", "") ) def send(self, message: Message) -> None: self._receive_queue.put(message) def send_text(self, data: str) -> None: self.send({"type": "websocket.receive", "text": data}) def send_bytes(self, data: bytes) -> None: self.send({"type": "websocket.receive", "bytes": data}) def send_json(self, data: typing.Any, mode: str = "text") -> None: assert mode in ["text", "binary"] text = json.dumps(data) if mode == "text": self.send({"type": "websocket.receive", "text": text}) else: self.send({"type": "websocket.receive", "bytes": text.encode("utf-8")}) def close(self, code: int = 1000) -> None: self.send({"type": "websocket.disconnect", "code": code}) def receive(self) -> Message: message = self._send_queue.get() if isinstance(message, BaseException): raise message return message def receive_text(self) -> str: message = self.receive() self._raise_on_close(message) return message["text"] def receive_bytes(self) -> bytes: message = self.receive() self._raise_on_close(message) return message["bytes"] def receive_json(self, mode: str = "text") -> typing.Any: assert mode in ["text", "binary"] message = self.receive() self._raise_on_close(message) if mode == "text": text = message["text"] else: text = message["bytes"].decode("utf-8") return json.loads(text) class _TestClientTransport(httpx.BaseTransport): def __init__( self, app: ASGI3App, portal_factory: _PortalFactoryType, raise_server_exceptions: bool = True, root_path: str = "", ) -> None: self.app = app self.raise_server_exceptions = raise_server_exceptions self.root_path = root_path self.portal_factory = portal_factory def handle_request(self, request: httpx.Request) -> httpx.Response: scheme = request.url.scheme netloc = request.url.netloc.decode(encoding="ascii") path = request.url.path raw_path = request.url.raw_path query = request.url.query.decode(encoding="ascii") default_port = {"http": 80, "ws": 80, "https": 443, "wss": 443}[scheme] if ":" in netloc: host, port_string = netloc.split(":", 1) port = int(port_string) else: host = netloc port = default_port # Include the 'host' header. if "host" in request.headers: headers: typing.List[typing.Tuple[bytes, bytes]] = [] elif port == default_port: # pragma: no cover headers = [(b"host", host.encode())] else: # pragma: no cover headers = [(b"host", (f"{host}:{port}").encode())] # Include other request headers. headers += [ (key.lower().encode(), value.encode()) for key, value in request.headers.items() ] scope: typing.Dict[str, typing.Any] if scheme in {"ws", "wss"}: subprotocol = request.headers.get("sec-websocket-protocol", None) if subprotocol is None: subprotocols: typing.Sequence[str] = [] else: subprotocols = [value.strip() for value in subprotocol.split(",")] scope = { "type": "websocket", "path": unquote(path), "raw_path": raw_path, "root_path": self.root_path, "scheme": scheme, "query_string": query.encode(), "headers": headers, "client": ["testclient", 50000], "server": [host, port], "subprotocols": subprotocols, } session = WebSocketTestSession(self.app, scope, self.portal_factory) raise _Upgrade(session) scope = { "type": "http", "http_version": "1.1", "method": request.method, "path": unquote(path), "raw_path": raw_path, "root_path": self.root_path, "scheme": scheme, "query_string": query.encode(), "headers": headers, "client": ["testclient", 50000], "server": [host, port], "extensions": {"http.response.debug": {}}, } request_complete = False response_started = False response_complete: anyio.Event raw_kwargs: typing.Dict[str, typing.Any] = {"stream": io.BytesIO()} template = None context = None async def receive() -> Message: nonlocal request_complete if request_complete: if not response_complete.is_set(): await response_complete.wait() return {"type": "http.disconnect"} body = request.read() if isinstance(body, str): body_bytes: bytes = body.encode("utf-8") # pragma: no cover elif body is None: body_bytes = b"" # pragma: no cover elif isinstance(body, GeneratorType): try: # pragma: no cover chunk = body.send(None) if isinstance(chunk, str): chunk = chunk.encode("utf-8") return {"type": "http.request", "body": chunk, "more_body": True} except StopIteration: # pragma: no cover request_complete = True return {"type": "http.request", "body": b""} else: body_bytes = body request_complete = True return {"type": "http.request", "body": body_bytes} async def send(message: Message) -> None: nonlocal raw_kwargs, response_started, template, context if message["type"] == "http.response.start": assert ( not response_started ), 'Received multiple "http.response.start" messages.' raw_kwargs["status_code"] = message["status"] raw_kwargs["headers"] = [ (key.decode(), value.decode()) for key, value in message.get("headers", []) ] response_started = True elif message["type"] == "http.response.body": assert ( response_started ), 'Received "http.response.body" without "http.response.start".' assert ( not response_complete.is_set() ), 'Received "http.response.body" after response completed.' body = message.get("body", b"") more_body = message.get("more_body", False) if request.method != "HEAD": raw_kwargs["stream"].write(body) if not more_body: raw_kwargs["stream"].seek(0) response_complete.set() elif message["type"] == "http.response.debug": template = message["info"]["template"] context = message["info"]["context"] try: with self.portal_factory() as portal: response_complete = portal.call(anyio.Event) portal.call(self.app, scope, receive, send) except BaseException as exc: if self.raise_server_exceptions: raise exc if self.raise_server_exceptions: assert response_started, "TestClient did not receive any response." elif not response_started: raw_kwargs = { "status_code": 500, "headers": [], "stream": io.BytesIO(), } raw_kwargs["stream"] = httpx.ByteStream(raw_kwargs["stream"].read()) response = httpx.Response(**raw_kwargs, request=request) if template is not None: response.template = template # type: ignore[attr-defined] response.context = context # type: ignore[attr-defined] return response class TestClient(httpx.Client): __test__ = False task: "Future[None]" portal: typing.Optional[anyio.abc.BlockingPortal] = None def __init__( self, app: ASGIApp, base_url: str = "http://testserver", raise_server_exceptions: bool = True, root_path: str = "", backend: str = "asyncio", backend_options: typing.Optional[typing.Dict[str, typing.Any]] = None, cookies: httpx._client.CookieTypes = None, headers: typing.Dict[str, str] = None, ) -> None: self.async_backend = _AsyncBackend( backend=backend, backend_options=backend_options or {} ) if _is_asgi3(app): app = typing.cast(ASGI3App, app) asgi_app = app else: app = typing.cast(ASGI2App, app) # type: ignore[assignment] asgi_app = _WrapASGI2(app) # type: ignore[arg-type] self.app = asgi_app transport = _TestClientTransport( self.app, portal_factory=self._portal_factory, raise_server_exceptions=raise_server_exceptions, root_path=root_path, ) if headers is None: headers = {} headers.setdefault("user-agent", "testclient") super().__init__( app=self.app, base_url=base_url, headers=headers, transport=transport, follow_redirects=True, cookies=cookies, ) @contextlib.contextmanager def _portal_factory(self) -> typing.Generator[anyio.abc.BlockingPortal, None, None]: if self.portal is not None: yield self.portal else: with anyio.from_thread.start_blocking_portal( **self.async_backend ) as portal: yield portal def _choose_redirect_arg( self, follow_redirects: typing.Optional[bool], allow_redirects: typing.Optional[bool], ) -> typing.Union[bool, httpx._client.UseClientDefault]: redirect: typing.Union[ bool, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT if allow_redirects is not None: message = ( "The `allow_redirects` argument is deprecated. " "Use `follow_redirects` instead." ) warnings.warn(message, DeprecationWarning) redirect = allow_redirects if follow_redirects is not None: redirect = follow_redirects elif allow_redirects is not None and follow_redirects is not None: raise RuntimeError( # pragma: no cover "Cannot use both `allow_redirects` and `follow_redirects`." ) return redirect def request( # type: ignore[override] self, method: str, url: httpx._types.URLTypes, *, content: typing.Optional[httpx._types.RequestContent] = None, data: typing.Optional[_RequestData] = None, files: typing.Optional[httpx._types.RequestFiles] = None, json: typing.Any = None, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: url = self.base_url.join(url) redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().request( method, url, content=content, data=data, # type: ignore[arg-type] files=files, json=json, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def get( # type: ignore[override] self, url: httpx._types.URLTypes, *, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().get( url, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def options( # type: ignore[override] self, url: httpx._types.URLTypes, *, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().options( url, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def head( # type: ignore[override] self, url: httpx._types.URLTypes, *, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().head( url, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def post( # type: ignore[override] self, url: httpx._types.URLTypes, *, content: typing.Optional[httpx._types.RequestContent] = None, data: typing.Optional[_RequestData] = None, files: typing.Optional[httpx._types.RequestFiles] = None, json: typing.Any = None, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().post( url, content=content, data=data, # type: ignore[arg-type] files=files, json=json, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def put( # type: ignore[override] self, url: httpx._types.URLTypes, *, content: typing.Optional[httpx._types.RequestContent] = None, data: typing.Optional[_RequestData] = None, files: typing.Optional[httpx._types.RequestFiles] = None, json: typing.Any = None, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().put( url, content=content, data=data, # type: ignore[arg-type] files=files, json=json, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def patch( # type: ignore[override] self, url: httpx._types.URLTypes, *, content: typing.Optional[httpx._types.RequestContent] = None, data: typing.Optional[_RequestData] = None, files: typing.Optional[httpx._types.RequestFiles] = None, json: typing.Any = None, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().patch( url, content=content, data=data, # type: ignore[arg-type] files=files, json=json, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def delete( # type: ignore[override] self, url: httpx._types.URLTypes, *, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().delete( url, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def websocket_connect( self, url: str, subprotocols: typing.Sequence[str] = None, **kwargs: typing.Any ) -> typing.Any: url = urljoin("ws://testserver", url) headers = kwargs.get("headers", {}) headers.setdefault("connection", "upgrade") headers.setdefault("sec-websocket-key", "testserver==") headers.setdefault("sec-websocket-version", "13") if subprotocols is not None: headers.setdefault("sec-websocket-protocol", ", ".join(subprotocols)) kwargs["headers"] = headers try: super().request("GET", url, **kwargs) except _Upgrade as exc: session = exc.session else: raise RuntimeError("Expected WebSocket upgrade") # pragma: no cover return session def __enter__(self) -> "TestClient": with contextlib.ExitStack() as stack: self.portal = portal = stack.enter_context( anyio.from_thread.start_blocking_portal(**self.async_backend) ) @stack.callback def reset_portal() -> None: self.portal = None self.stream_send = StapledObjectStream( *anyio.create_memory_object_stream(math.inf) ) self.stream_receive = StapledObjectStream( *anyio.create_memory_object_stream(math.inf) ) self.task = portal.start_task_soon(self.lifespan) portal.call(self.wait_startup) @stack.callback def wait_shutdown() -> None: portal.call(self.wait_shutdown) self.exit_stack = stack.pop_all() return self def __exit__(self, *args: typing.Any) -> None: self.exit_stack.close() async def lifespan(self) -> None: scope = {"type": "lifespan"} try: await self.app(scope, self.stream_receive.receive, self.stream_send.send) finally: await self.stream_send.send(None) async def wait_startup(self) -> None: await self.stream_receive.send({"type": "lifespan.startup"}) async def receive() -> typing.Any: message = await self.stream_send.receive() if message is None: self.task.result() return message message = await receive() assert message["type"] in ( "lifespan.startup.complete", "lifespan.startup.failed", ) if message["type"] == "lifespan.startup.failed": await receive() async def wait_shutdown(self) -> None: async def receive() -> typing.Any: message = await self.stream_send.receive() if message is None: self.task.result() return message async with self.stream_send: await self.stream_receive.send({"type": "lifespan.shutdown"}) message = await receive() assert message["type"] in ( "lifespan.shutdown.complete", "lifespan.shutdown.failed", ) if message["type"] == "lifespan.shutdown.failed": await receive()
28,877
Python
35.508217
88
0.570939
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/concurrency.py
import functools import sys import typing import warnings import anyio if sys.version_info >= (3, 10): # pragma: no cover from typing import ParamSpec else: # pragma: no cover from typing_extensions import ParamSpec T = typing.TypeVar("T") P = ParamSpec("P") async def run_until_first_complete(*args: typing.Tuple[typing.Callable, dict]) -> None: warnings.warn( "run_until_first_complete is deprecated " "and will be removed in a future version.", DeprecationWarning, ) async with anyio.create_task_group() as task_group: async def run(func: typing.Callable[[], typing.Coroutine]) -> None: await func() task_group.cancel_scope.cancel() for func, kwargs in args: task_group.start_soon(run, functools.partial(func, **kwargs)) async def run_in_threadpool( func: typing.Callable[P, T], *args: P.args, **kwargs: P.kwargs ) -> T: if kwargs: # pragma: no cover # run_sync doesn't accept 'kwargs', so bind them in here func = functools.partial(func, **kwargs) return await anyio.to_thread.run_sync(func, *args) class _StopIteration(Exception): pass def _next(iterator: typing.Iterator[T]) -> T: # We can't raise `StopIteration` from within the threadpool iterator # and catch it outside that context, so we coerce them into a different # exception type. try: return next(iterator) except StopIteration: raise _StopIteration async def iterate_in_threadpool( iterator: typing.Iterator[T], ) -> typing.AsyncIterator[T]: while True: try: yield await anyio.to_thread.run_sync(_next, iterator) except _StopIteration: break
1,741
Python
25.393939
87
0.651924
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/routing.py
import contextlib import functools import inspect import re import traceback import types import typing import warnings from contextlib import asynccontextmanager from enum import Enum from starlette._utils import is_async_callable from starlette.concurrency import run_in_threadpool from starlette.convertors import CONVERTOR_TYPES, Convertor from starlette.datastructures import URL, Headers, URLPath from starlette.exceptions import HTTPException from starlette.middleware import Middleware from starlette.requests import Request from starlette.responses import PlainTextResponse, RedirectResponse from starlette.types import ASGIApp, Receive, Scope, Send from starlette.websockets import WebSocket, WebSocketClose class NoMatchFound(Exception): """ Raised by `.url_for(name, **path_params)` and `.url_path_for(name, **path_params)` if no matching route exists. """ def __init__(self, name: str, path_params: typing.Dict[str, typing.Any]) -> None: params = ", ".join(list(path_params.keys())) super().__init__(f'No route exists for name "{name}" and params "{params}".') class Match(Enum): NONE = 0 PARTIAL = 1 FULL = 2 def iscoroutinefunction_or_partial(obj: typing.Any) -> bool: # pragma: no cover """ Correctly determines if an object is a coroutine function, including those wrapped in functools.partial objects. """ warnings.warn( "iscoroutinefunction_or_partial is deprecated, " "and will be removed in a future release.", DeprecationWarning, ) while isinstance(obj, functools.partial): obj = obj.func return inspect.iscoroutinefunction(obj) def request_response(func: typing.Callable) -> ASGIApp: """ Takes a function or coroutine `func(request) -> response`, and returns an ASGI application. """ is_coroutine = is_async_callable(func) async def app(scope: Scope, receive: Receive, send: Send) -> None: request = Request(scope, receive=receive, send=send) if is_coroutine: response = await func(request) else: response = await run_in_threadpool(func, request) await response(scope, receive, send) return app def websocket_session(func: typing.Callable) -> ASGIApp: """ Takes a coroutine `func(session)`, and returns an ASGI application. """ # assert asyncio.iscoroutinefunction(func), "WebSocket endpoints must be async" async def app(scope: Scope, receive: Receive, send: Send) -> None: session = WebSocket(scope, receive=receive, send=send) await func(session) return app def get_name(endpoint: typing.Callable) -> str: if inspect.isroutine(endpoint) or inspect.isclass(endpoint): return endpoint.__name__ return endpoint.__class__.__name__ def replace_params( path: str, param_convertors: typing.Dict[str, Convertor], path_params: typing.Dict[str, str], ) -> typing.Tuple[str, dict]: for key, value in list(path_params.items()): if "{" + key + "}" in path: convertor = param_convertors[key] value = convertor.to_string(value) path = path.replace("{" + key + "}", value) path_params.pop(key) return path, path_params # Match parameters in URL paths, eg. '{param}', and '{param:int}' PARAM_REGEX = re.compile("{([a-zA-Z_][a-zA-Z0-9_]*)(:[a-zA-Z_][a-zA-Z0-9_]*)?}") def compile_path( path: str, ) -> typing.Tuple[typing.Pattern, str, typing.Dict[str, Convertor]]: """ Given a path string, like: "/{username:str}", or a host string, like: "{subdomain}.mydomain.org", return a three-tuple of (regex, format, {param_name:convertor}). regex: "/(?P<username>[^/]+)" format: "/{username}" convertors: {"username": StringConvertor()} """ is_host = not path.startswith("/") path_regex = "^" path_format = "" duplicated_params = set() idx = 0 param_convertors = {} for match in PARAM_REGEX.finditer(path): param_name, convertor_type = match.groups("str") convertor_type = convertor_type.lstrip(":") assert ( convertor_type in CONVERTOR_TYPES ), f"Unknown path convertor '{convertor_type}'" convertor = CONVERTOR_TYPES[convertor_type] path_regex += re.escape(path[idx : match.start()]) path_regex += f"(?P<{param_name}>{convertor.regex})" path_format += path[idx : match.start()] path_format += "{%s}" % param_name if param_name in param_convertors: duplicated_params.add(param_name) param_convertors[param_name] = convertor idx = match.end() if duplicated_params: names = ", ".join(sorted(duplicated_params)) ending = "s" if len(duplicated_params) > 1 else "" raise ValueError(f"Duplicated param name{ending} {names} at path {path}") if is_host: # Align with `Host.matches()` behavior, which ignores port. hostname = path[idx:].split(":")[0] path_regex += re.escape(hostname) + "$" else: path_regex += re.escape(path[idx:]) + "$" path_format += path[idx:] return re.compile(path_regex), path_format, param_convertors class BaseRoute: def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]: raise NotImplementedError() # pragma: no cover def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath: raise NotImplementedError() # pragma: no cover async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: raise NotImplementedError() # pragma: no cover async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: """ A route may be used in isolation as a stand-alone ASGI app. This is a somewhat contrived case, as they'll almost always be used within a Router, but could be useful for some tooling and minimal apps. """ match, child_scope = self.matches(scope) if match == Match.NONE: if scope["type"] == "http": response = PlainTextResponse("Not Found", status_code=404) await response(scope, receive, send) elif scope["type"] == "websocket": websocket_close = WebSocketClose() await websocket_close(scope, receive, send) return scope.update(child_scope) await self.handle(scope, receive, send) class Route(BaseRoute): def __init__( self, path: str, endpoint: typing.Callable, *, methods: typing.Optional[typing.List[str]] = None, name: typing.Optional[str] = None, include_in_schema: bool = True, ) -> None: assert path.startswith("/"), "Routed paths must start with '/'" self.path = path self.endpoint = endpoint self.name = get_name(endpoint) if name is None else name self.include_in_schema = include_in_schema endpoint_handler = endpoint while isinstance(endpoint_handler, functools.partial): endpoint_handler = endpoint_handler.func if inspect.isfunction(endpoint_handler) or inspect.ismethod(endpoint_handler): # Endpoint is function or method. Treat it as `func(request) -> response`. self.app = request_response(endpoint) if methods is None: methods = ["GET"] else: # Endpoint is a class. Treat it as ASGI. self.app = endpoint if methods is None: self.methods = None else: self.methods = {method.upper() for method in methods} if "GET" in self.methods: self.methods.add("HEAD") self.path_regex, self.path_format, self.param_convertors = compile_path(path) def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]: if scope["type"] == "http": match = self.path_regex.match(scope["path"]) if match: matched_params = match.groupdict() for key, value in matched_params.items(): matched_params[key] = self.param_convertors[key].convert(value) path_params = dict(scope.get("path_params", {})) path_params.update(matched_params) child_scope = {"endpoint": self.endpoint, "path_params": path_params} if self.methods and scope["method"] not in self.methods: return Match.PARTIAL, child_scope else: return Match.FULL, child_scope return Match.NONE, {} def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath: seen_params = set(path_params.keys()) expected_params = set(self.param_convertors.keys()) if name != self.name or seen_params != expected_params: raise NoMatchFound(name, path_params) path, remaining_params = replace_params( self.path_format, self.param_convertors, path_params ) assert not remaining_params return URLPath(path=path, protocol="http") async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: if self.methods and scope["method"] not in self.methods: headers = {"Allow": ", ".join(self.methods)} if "app" in scope: raise HTTPException(status_code=405, headers=headers) else: response = PlainTextResponse( "Method Not Allowed", status_code=405, headers=headers ) await response(scope, receive, send) else: await self.app(scope, receive, send) def __eq__(self, other: typing.Any) -> bool: return ( isinstance(other, Route) and self.path == other.path and self.endpoint == other.endpoint and self.methods == other.methods ) def __repr__(self) -> str: class_name = self.__class__.__name__ methods = sorted(self.methods or []) path, name = self.path, self.name return f"{class_name}(path={path!r}, name={name!r}, methods={methods!r})" class WebSocketRoute(BaseRoute): def __init__( self, path: str, endpoint: typing.Callable, *, name: typing.Optional[str] = None ) -> None: assert path.startswith("/"), "Routed paths must start with '/'" self.path = path self.endpoint = endpoint self.name = get_name(endpoint) if name is None else name endpoint_handler = endpoint while isinstance(endpoint_handler, functools.partial): endpoint_handler = endpoint_handler.func if inspect.isfunction(endpoint_handler) or inspect.ismethod(endpoint_handler): # Endpoint is function or method. Treat it as `func(websocket)`. self.app = websocket_session(endpoint) else: # Endpoint is a class. Treat it as ASGI. self.app = endpoint self.path_regex, self.path_format, self.param_convertors = compile_path(path) def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]: if scope["type"] == "websocket": match = self.path_regex.match(scope["path"]) if match: matched_params = match.groupdict() for key, value in matched_params.items(): matched_params[key] = self.param_convertors[key].convert(value) path_params = dict(scope.get("path_params", {})) path_params.update(matched_params) child_scope = {"endpoint": self.endpoint, "path_params": path_params} return Match.FULL, child_scope return Match.NONE, {} def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath: seen_params = set(path_params.keys()) expected_params = set(self.param_convertors.keys()) if name != self.name or seen_params != expected_params: raise NoMatchFound(name, path_params) path, remaining_params = replace_params( self.path_format, self.param_convertors, path_params ) assert not remaining_params return URLPath(path=path, protocol="websocket") async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: await self.app(scope, receive, send) def __eq__(self, other: typing.Any) -> bool: return ( isinstance(other, WebSocketRoute) and self.path == other.path and self.endpoint == other.endpoint ) def __repr__(self) -> str: return f"{self.__class__.__name__}(path={self.path!r}, name={self.name!r})" class Mount(BaseRoute): def __init__( self, path: str, app: typing.Optional[ASGIApp] = None, routes: typing.Optional[typing.Sequence[BaseRoute]] = None, name: typing.Optional[str] = None, *, middleware: typing.Optional[typing.Sequence[Middleware]] = None, ) -> None: assert path == "" or path.startswith("/"), "Routed paths must start with '/'" assert ( app is not None or routes is not None ), "Either 'app=...', or 'routes=' must be specified" self.path = path.rstrip("/") if app is not None: self._base_app: ASGIApp = app else: self._base_app = Router(routes=routes) self.app = self._base_app if middleware is not None: for cls, options in reversed(middleware): self.app = cls(app=self.app, **options) self.name = name self.path_regex, self.path_format, self.param_convertors = compile_path( self.path + "/{path:path}" ) @property def routes(self) -> typing.List[BaseRoute]: return getattr(self._base_app, "routes", []) def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]: if scope["type"] in ("http", "websocket"): path = scope["path"] match = self.path_regex.match(path) if match: matched_params = match.groupdict() for key, value in matched_params.items(): matched_params[key] = self.param_convertors[key].convert(value) remaining_path = "/" + matched_params.pop("path") matched_path = path[: -len(remaining_path)] path_params = dict(scope.get("path_params", {})) path_params.update(matched_params) root_path = scope.get("root_path", "") child_scope = { "path_params": path_params, "app_root_path": scope.get("app_root_path", root_path), "root_path": root_path + matched_path, "path": remaining_path, "endpoint": self.app, } return Match.FULL, child_scope return Match.NONE, {} def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath: if self.name is not None and name == self.name and "path" in path_params: # 'name' matches "<mount_name>". path_params["path"] = path_params["path"].lstrip("/") path, remaining_params = replace_params( self.path_format, self.param_convertors, path_params ) if not remaining_params: return URLPath(path=path) elif self.name is None or name.startswith(self.name + ":"): if self.name is None: # No mount name. remaining_name = name else: # 'name' matches "<mount_name>:<child_name>". remaining_name = name[len(self.name) + 1 :] path_kwarg = path_params.get("path") path_params["path"] = "" path_prefix, remaining_params = replace_params( self.path_format, self.param_convertors, path_params ) if path_kwarg is not None: remaining_params["path"] = path_kwarg for route in self.routes or []: try: url = route.url_path_for(remaining_name, **remaining_params) return URLPath( path=path_prefix.rstrip("/") + str(url), protocol=url.protocol ) except NoMatchFound: pass raise NoMatchFound(name, path_params) async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: await self.app(scope, receive, send) def __eq__(self, other: typing.Any) -> bool: return ( isinstance(other, Mount) and self.path == other.path and self.app == other.app ) def __repr__(self) -> str: class_name = self.__class__.__name__ name = self.name or "" return f"{class_name}(path={self.path!r}, name={name!r}, app={self.app!r})" class Host(BaseRoute): def __init__( self, host: str, app: ASGIApp, name: typing.Optional[str] = None ) -> None: assert not host.startswith("/"), "Host must not start with '/'" self.host = host self.app = app self.name = name self.host_regex, self.host_format, self.param_convertors = compile_path(host) @property def routes(self) -> typing.List[BaseRoute]: return getattr(self.app, "routes", []) def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]: if scope["type"] in ("http", "websocket"): headers = Headers(scope=scope) host = headers.get("host", "").split(":")[0] match = self.host_regex.match(host) if match: matched_params = match.groupdict() for key, value in matched_params.items(): matched_params[key] = self.param_convertors[key].convert(value) path_params = dict(scope.get("path_params", {})) path_params.update(matched_params) child_scope = {"path_params": path_params, "endpoint": self.app} return Match.FULL, child_scope return Match.NONE, {} def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath: if self.name is not None and name == self.name and "path" in path_params: # 'name' matches "<mount_name>". path = path_params.pop("path") host, remaining_params = replace_params( self.host_format, self.param_convertors, path_params ) if not remaining_params: return URLPath(path=path, host=host) elif self.name is None or name.startswith(self.name + ":"): if self.name is None: # No mount name. remaining_name = name else: # 'name' matches "<mount_name>:<child_name>". remaining_name = name[len(self.name) + 1 :] host, remaining_params = replace_params( self.host_format, self.param_convertors, path_params ) for route in self.routes or []: try: url = route.url_path_for(remaining_name, **remaining_params) return URLPath(path=str(url), protocol=url.protocol, host=host) except NoMatchFound: pass raise NoMatchFound(name, path_params) async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: await self.app(scope, receive, send) def __eq__(self, other: typing.Any) -> bool: return ( isinstance(other, Host) and self.host == other.host and self.app == other.app ) def __repr__(self) -> str: class_name = self.__class__.__name__ name = self.name or "" return f"{class_name}(host={self.host!r}, name={name!r}, app={self.app!r})" _T = typing.TypeVar("_T") class _AsyncLiftContextManager(typing.AsyncContextManager[_T]): def __init__(self, cm: typing.ContextManager[_T]): self._cm = cm async def __aenter__(self) -> _T: return self._cm.__enter__() async def __aexit__( self, exc_type: typing.Optional[typing.Type[BaseException]], exc_value: typing.Optional[BaseException], traceback: typing.Optional[types.TracebackType], ) -> typing.Optional[bool]: return self._cm.__exit__(exc_type, exc_value, traceback) def _wrap_gen_lifespan_context( lifespan_context: typing.Callable[[typing.Any], typing.Generator] ) -> typing.Callable[[typing.Any], typing.AsyncContextManager]: cmgr = contextlib.contextmanager(lifespan_context) @functools.wraps(cmgr) def wrapper(app: typing.Any) -> _AsyncLiftContextManager: return _AsyncLiftContextManager(cmgr(app)) return wrapper class _DefaultLifespan: def __init__(self, router: "Router"): self._router = router async def __aenter__(self) -> None: await self._router.startup() async def __aexit__(self, *exc_info: object) -> None: await self._router.shutdown() def __call__(self: _T, app: object) -> _T: return self class Router: def __init__( self, routes: typing.Optional[typing.Sequence[BaseRoute]] = None, redirect_slashes: bool = True, default: typing.Optional[ASGIApp] = None, on_startup: typing.Optional[typing.Sequence[typing.Callable]] = None, on_shutdown: typing.Optional[typing.Sequence[typing.Callable]] = None, lifespan: typing.Optional[ typing.Callable[[typing.Any], typing.AsyncContextManager] ] = None, ) -> None: self.routes = [] if routes is None else list(routes) self.redirect_slashes = redirect_slashes self.default = self.not_found if default is None else default self.on_startup = [] if on_startup is None else list(on_startup) self.on_shutdown = [] if on_shutdown is None else list(on_shutdown) if lifespan is None: self.lifespan_context: typing.Callable[ [typing.Any], typing.AsyncContextManager ] = _DefaultLifespan(self) elif inspect.isasyncgenfunction(lifespan): warnings.warn( "async generator function lifespans are deprecated, " "use an @contextlib.asynccontextmanager function instead", DeprecationWarning, ) self.lifespan_context = asynccontextmanager( lifespan, # type: ignore[arg-type] ) elif inspect.isgeneratorfunction(lifespan): warnings.warn( "generator function lifespans are deprecated, " "use an @contextlib.asynccontextmanager function instead", DeprecationWarning, ) self.lifespan_context = _wrap_gen_lifespan_context( lifespan, # type: ignore[arg-type] ) else: self.lifespan_context = lifespan async def not_found(self, scope: Scope, receive: Receive, send: Send) -> None: if scope["type"] == "websocket": websocket_close = WebSocketClose() await websocket_close(scope, receive, send) return # If we're running inside a starlette application then raise an # exception, so that the configurable exception handler can deal with # returning the response. For plain ASGI apps, just return the response. if "app" in scope: raise HTTPException(status_code=404) else: response = PlainTextResponse("Not Found", status_code=404) await response(scope, receive, send) def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath: for route in self.routes: try: return route.url_path_for(name, **path_params) except NoMatchFound: pass raise NoMatchFound(name, path_params) async def startup(self) -> None: """ Run any `.on_startup` event handlers. """ for handler in self.on_startup: if is_async_callable(handler): await handler() else: handler() async def shutdown(self) -> None: """ Run any `.on_shutdown` event handlers. """ for handler in self.on_shutdown: if is_async_callable(handler): await handler() else: handler() async def lifespan(self, scope: Scope, receive: Receive, send: Send) -> None: """ Handle ASGI lifespan messages, which allows us to manage application startup and shutdown events. """ started = False app = scope.get("app") await receive() try: async with self.lifespan_context(app): await send({"type": "lifespan.startup.complete"}) started = True await receive() except BaseException: exc_text = traceback.format_exc() if started: await send({"type": "lifespan.shutdown.failed", "message": exc_text}) else: await send({"type": "lifespan.startup.failed", "message": exc_text}) raise else: await send({"type": "lifespan.shutdown.complete"}) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: """ The main entry point to the Router class. """ assert scope["type"] in ("http", "websocket", "lifespan") if "router" not in scope: scope["router"] = self if scope["type"] == "lifespan": await self.lifespan(scope, receive, send) return partial = None for route in self.routes: # Determine if any route matches the incoming scope, # and hand over to the matching route if found. match, child_scope = route.matches(scope) if match == Match.FULL: scope.update(child_scope) await route.handle(scope, receive, send) return elif match == Match.PARTIAL and partial is None: partial = route partial_scope = child_scope if partial is not None: #  Handle partial matches. These are cases where an endpoint is # able to handle the request, but is not a preferred option. # We use this in particular to deal with "405 Method Not Allowed". scope.update(partial_scope) await partial.handle(scope, receive, send) return if scope["type"] == "http" and self.redirect_slashes and scope["path"] != "/": redirect_scope = dict(scope) if scope["path"].endswith("/"): redirect_scope["path"] = redirect_scope["path"].rstrip("/") else: redirect_scope["path"] = redirect_scope["path"] + "/" for route in self.routes: match, child_scope = route.matches(redirect_scope) if match != Match.NONE: redirect_url = URL(scope=redirect_scope) response = RedirectResponse(url=str(redirect_url)) await response(scope, receive, send) return await self.default(scope, receive, send) def __eq__(self, other: typing.Any) -> bool: return isinstance(other, Router) and self.routes == other.routes def mount( self, path: str, app: ASGIApp, name: typing.Optional[str] = None ) -> None: # pragma: nocover route = Mount(path, app=app, name=name) self.routes.append(route) def host( self, host: str, app: ASGIApp, name: typing.Optional[str] = None ) -> None: # pragma: no cover route = Host(host, app=app, name=name) self.routes.append(route) def add_route( self, path: str, endpoint: typing.Callable, methods: typing.Optional[typing.List[str]] = None, name: typing.Optional[str] = None, include_in_schema: bool = True, ) -> None: # pragma: nocover route = Route( path, endpoint=endpoint, methods=methods, name=name, include_in_schema=include_in_schema, ) self.routes.append(route) def add_websocket_route( self, path: str, endpoint: typing.Callable, name: typing.Optional[str] = None ) -> None: # pragma: no cover route = WebSocketRoute(path, endpoint=endpoint, name=name) self.routes.append(route) def route( self, path: str, methods: typing.Optional[typing.List[str]] = None, name: typing.Optional[str] = None, include_in_schema: bool = True, ) -> typing.Callable: """ We no longer document this decorator style API, and its usage is discouraged. Instead you should use the following approach: >>> routes = [Route(path, endpoint=...), ...] >>> app = Starlette(routes=routes) """ warnings.warn( "The `route` decorator is deprecated, and will be removed in version 1.0.0." "Refer to https://www.starlette.io/routing/#http-routing for the recommended approach.", # noqa: E501 DeprecationWarning, ) def decorator(func: typing.Callable) -> typing.Callable: self.add_route( path, func, methods=methods, name=name, include_in_schema=include_in_schema, ) return func return decorator def websocket_route( self, path: str, name: typing.Optional[str] = None ) -> typing.Callable: """ We no longer document this decorator style API, and its usage is discouraged. Instead you should use the following approach: >>> routes = [WebSocketRoute(path, endpoint=...), ...] >>> app = Starlette(routes=routes) """ warnings.warn( "The `websocket_route` decorator is deprecated, and will be removed in version 1.0.0. Refer to " # noqa: E501 "https://www.starlette.io/routing/#websocket-routing for the recommended approach.", # noqa: E501 DeprecationWarning, ) def decorator(func: typing.Callable) -> typing.Callable: self.add_websocket_route(path, func, name=name) return func return decorator def add_event_handler( self, event_type: str, func: typing.Callable ) -> None: # pragma: no cover assert event_type in ("startup", "shutdown") if event_type == "startup": self.on_startup.append(func) else: self.on_shutdown.append(func) def on_event(self, event_type: str) -> typing.Callable: warnings.warn( "The `on_event` decorator is deprecated, and will be removed in version 1.0.0. " # noqa: E501 "Refer to https://www.starlette.io/events/#registering-events for recommended approach.", # noqa: E501 DeprecationWarning, ) def decorator(func: typing.Callable) -> typing.Callable: self.add_event_handler(event_type, func) return func return decorator
31,710
Python
36.26322
122
0.577483
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/types.py
import typing Scope = typing.MutableMapping[str, typing.Any] Message = typing.MutableMapping[str, typing.Any] Receive = typing.Callable[[], typing.Awaitable[Message]] Send = typing.Callable[[Message], typing.Awaitable[None]] ASGIApp = typing.Callable[[Scope, Receive, Send], typing.Awaitable[None]]
302
Python
29.299997
73
0.764901
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/_utils.py
import asyncio import functools import sys import typing from types import TracebackType if sys.version_info < (3, 8): # pragma: no cover from typing_extensions import Protocol else: # pragma: no cover from typing import Protocol def is_async_callable(obj: typing.Any) -> bool: while isinstance(obj, functools.partial): obj = obj.func return asyncio.iscoroutinefunction(obj) or ( callable(obj) and asyncio.iscoroutinefunction(obj.__call__) ) T_co = typing.TypeVar("T_co", covariant=True) # TODO: once 3.8 is the minimum supported version (27 Jun 2023) # this can just become # class AwaitableOrContextManager( # typing.Awaitable[T_co], # typing.AsyncContextManager[T_co], # typing.Protocol[T_co], # ): # pass class AwaitableOrContextManager(Protocol[T_co]): def __await__(self) -> typing.Generator[typing.Any, None, T_co]: ... # pragma: no cover async def __aenter__(self) -> T_co: ... # pragma: no cover async def __aexit__( self, __exc_type: typing.Optional[typing.Type[BaseException]], __exc_value: typing.Optional[BaseException], __traceback: typing.Optional[TracebackType], ) -> typing.Union[bool, None]: ... # pragma: no cover class SupportsAsyncClose(Protocol): async def close(self) -> None: ... # pragma: no cover SupportsAsyncCloseType = typing.TypeVar( "SupportsAsyncCloseType", bound=SupportsAsyncClose, covariant=False ) class AwaitableOrContextManagerWrapper(typing.Generic[SupportsAsyncCloseType]): __slots__ = ("aw", "entered") def __init__(self, aw: typing.Awaitable[SupportsAsyncCloseType]) -> None: self.aw = aw def __await__(self) -> typing.Generator[typing.Any, None, SupportsAsyncCloseType]: return self.aw.__await__() async def __aenter__(self) -> SupportsAsyncCloseType: self.entered = await self.aw return self.entered async def __aexit__(self, *args: typing.Any) -> typing.Union[None, bool]: await self.entered.close() return None
2,091
Python
26.893333
86
0.659015
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/staticfiles.py
import importlib.util import os import stat import typing from email.utils import parsedate import anyio from starlette.datastructures import URL, Headers from starlette.exceptions import HTTPException from starlette.responses import FileResponse, RedirectResponse, Response from starlette.types import Receive, Scope, Send PathLike = typing.Union[str, "os.PathLike[str]"] class NotModifiedResponse(Response): NOT_MODIFIED_HEADERS = ( "cache-control", "content-location", "date", "etag", "expires", "vary", ) def __init__(self, headers: Headers): super().__init__( status_code=304, headers={ name: value for name, value in headers.items() if name in self.NOT_MODIFIED_HEADERS }, ) class StaticFiles: def __init__( self, *, directory: typing.Optional[PathLike] = None, packages: typing.Optional[ typing.List[typing.Union[str, typing.Tuple[str, str]]] ] = None, html: bool = False, check_dir: bool = True, follow_symlink: bool = False, ) -> None: self.directory = directory self.packages = packages self.all_directories = self.get_directories(directory, packages) self.html = html self.config_checked = False self.follow_symlink = follow_symlink if check_dir and directory is not None and not os.path.isdir(directory): raise RuntimeError(f"Directory '{directory}' does not exist") def get_directories( self, directory: typing.Optional[PathLike] = None, packages: typing.Optional[ typing.List[typing.Union[str, typing.Tuple[str, str]]] ] = None, ) -> typing.List[PathLike]: """ Given `directory` and `packages` arguments, return a list of all the directories that should be used for serving static files from. """ directories = [] if directory is not None: directories.append(directory) for package in packages or []: if isinstance(package, tuple): package, statics_dir = package else: statics_dir = "statics" spec = importlib.util.find_spec(package) assert spec is not None, f"Package {package!r} could not be found." assert spec.origin is not None, f"Package {package!r} could not be found." package_directory = os.path.normpath( os.path.join(spec.origin, "..", statics_dir) ) assert os.path.isdir( package_directory ), f"Directory '{statics_dir!r}' in package {package!r} could not be found." directories.append(package_directory) return directories async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: """ The ASGI entry point. """ assert scope["type"] == "http" if not self.config_checked: await self.check_config() self.config_checked = True path = self.get_path(scope) response = await self.get_response(path, scope) await response(scope, receive, send) def get_path(self, scope: Scope) -> str: """ Given the ASGI scope, return the `path` string to serve up, with OS specific path separators, and any '..', '.' components removed. """ return os.path.normpath(os.path.join(*scope["path"].split("/"))) async def get_response(self, path: str, scope: Scope) -> Response: """ Returns an HTTP response, given the incoming path, method and request headers. """ if scope["method"] not in ("GET", "HEAD"): raise HTTPException(status_code=405) try: full_path, stat_result = await anyio.to_thread.run_sync( self.lookup_path, path ) except PermissionError: raise HTTPException(status_code=401) except OSError: raise if stat_result and stat.S_ISREG(stat_result.st_mode): # We have a static file to serve. return self.file_response(full_path, stat_result, scope) elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html: # We're in HTML mode, and have got a directory URL. # Check if we have 'index.html' file to serve. index_path = os.path.join(path, "index.html") full_path, stat_result = await anyio.to_thread.run_sync( self.lookup_path, index_path ) if stat_result is not None and stat.S_ISREG(stat_result.st_mode): if not scope["path"].endswith("/"): # Directory URLs should redirect to always end in "/". url = URL(scope=scope) url = url.replace(path=url.path + "/") return RedirectResponse(url=url) return self.file_response(full_path, stat_result, scope) if self.html: # Check for '404.html' if we're in HTML mode. full_path, stat_result = await anyio.to_thread.run_sync( self.lookup_path, "404.html" ) if stat_result and stat.S_ISREG(stat_result.st_mode): return FileResponse( full_path, stat_result=stat_result, method=scope["method"], status_code=404, ) raise HTTPException(status_code=404) def lookup_path( self, path: str ) -> typing.Tuple[str, typing.Optional[os.stat_result]]: for directory in self.all_directories: joined_path = os.path.join(directory, path) if self.follow_symlink: full_path = os.path.abspath(joined_path) else: full_path = os.path.realpath(joined_path) directory = os.path.realpath(directory) if os.path.commonprefix([full_path, directory]) != directory: # Don't allow misbehaving clients to break out of the static files # directory. continue try: return full_path, os.stat(full_path) except (FileNotFoundError, NotADirectoryError): continue return "", None def file_response( self, full_path: PathLike, stat_result: os.stat_result, scope: Scope, status_code: int = 200, ) -> Response: method = scope["method"] request_headers = Headers(scope=scope) response = FileResponse( full_path, status_code=status_code, stat_result=stat_result, method=method ) if self.is_not_modified(response.headers, request_headers): return NotModifiedResponse(response.headers) return response async def check_config(self) -> None: """ Perform a one-off configuration check that StaticFiles is actually pointed at a directory, so that we can raise loud errors rather than just returning 404 responses. """ if self.directory is None: return try: stat_result = await anyio.to_thread.run_sync(os.stat, self.directory) except FileNotFoundError: raise RuntimeError( f"StaticFiles directory '{self.directory}' does not exist." ) if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)): raise RuntimeError( f"StaticFiles path '{self.directory}' is not a directory." ) def is_not_modified( self, response_headers: Headers, request_headers: Headers ) -> bool: """ Given the request and response headers, return `True` if an HTTP "Not Modified" response could be returned instead. """ try: if_none_match = request_headers["if-none-match"] etag = response_headers["etag"] if if_none_match == etag: return True except KeyError: pass try: if_modified_since = parsedate(request_headers["if-modified-since"]) last_modified = parsedate(response_headers["last-modified"]) if ( if_modified_since is not None and last_modified is not None and if_modified_since >= last_modified ): return True except KeyError: pass return False
8,735
Python
34.368421
88
0.565197
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/requests.py
import json import typing from http import cookies as http_cookies import anyio from starlette._utils import AwaitableOrContextManager, AwaitableOrContextManagerWrapper from starlette.datastructures import URL, Address, FormData, Headers, QueryParams, State from starlette.exceptions import HTTPException from starlette.formparsers import FormParser, MultiPartException, MultiPartParser from starlette.types import Message, Receive, Scope, Send try: from multipart.multipart import parse_options_header except ImportError: # pragma: nocover parse_options_header = None if typing.TYPE_CHECKING: from starlette.routing import Router SERVER_PUSH_HEADERS_TO_COPY = { "accept", "accept-encoding", "accept-language", "cache-control", "user-agent", } def cookie_parser(cookie_string: str) -> typing.Dict[str, str]: """ This function parses a ``Cookie`` HTTP header into a dict of key/value pairs. It attempts to mimic browser cookie parsing behavior: browsers and web servers frequently disregard the spec (RFC 6265) when setting and reading cookies, so we attempt to suit the common scenarios here. This function has been adapted from Django 3.1.0. Note: we are explicitly _NOT_ using `SimpleCookie.load` because it is based on an outdated spec and will fail on lots of input we want to support """ cookie_dict: typing.Dict[str, str] = {} for chunk in cookie_string.split(";"): if "=" in chunk: key, val = chunk.split("=", 1) else: # Assume an empty name per # https://bugzilla.mozilla.org/show_bug.cgi?id=169091 key, val = "", chunk key, val = key.strip(), val.strip() if key or val: # unquote using Python's algorithm. cookie_dict[key] = http_cookies._unquote(val) return cookie_dict class ClientDisconnect(Exception): pass class HTTPConnection(typing.Mapping[str, typing.Any]): """ A base class for incoming HTTP connections, that is used to provide any functionality that is common to both `Request` and `WebSocket`. """ def __init__(self, scope: Scope, receive: typing.Optional[Receive] = None) -> None: assert scope["type"] in ("http", "websocket") self.scope = scope def __getitem__(self, key: str) -> typing.Any: return self.scope[key] def __iter__(self) -> typing.Iterator[str]: return iter(self.scope) def __len__(self) -> int: return len(self.scope) # Don't use the `abc.Mapping.__eq__` implementation. # Connection instances should never be considered equal # unless `self is other`. __eq__ = object.__eq__ __hash__ = object.__hash__ @property def app(self) -> typing.Any: return self.scope["app"] @property def url(self) -> URL: if not hasattr(self, "_url"): self._url = URL(scope=self.scope) return self._url @property def base_url(self) -> URL: if not hasattr(self, "_base_url"): base_url_scope = dict(self.scope) base_url_scope["path"] = "/" base_url_scope["query_string"] = b"" base_url_scope["root_path"] = base_url_scope.get( "app_root_path", base_url_scope.get("root_path", "") ) self._base_url = URL(scope=base_url_scope) return self._base_url @property def headers(self) -> Headers: if not hasattr(self, "_headers"): self._headers = Headers(scope=self.scope) return self._headers @property def query_params(self) -> QueryParams: if not hasattr(self, "_query_params"): self._query_params = QueryParams(self.scope["query_string"]) return self._query_params @property def path_params(self) -> typing.Dict[str, typing.Any]: return self.scope.get("path_params", {}) @property def cookies(self) -> typing.Dict[str, str]: if not hasattr(self, "_cookies"): cookies: typing.Dict[str, str] = {} cookie_header = self.headers.get("cookie") if cookie_header: cookies = cookie_parser(cookie_header) self._cookies = cookies return self._cookies @property def client(self) -> typing.Optional[Address]: # client is a 2 item tuple of (host, port), None or missing host_port = self.scope.get("client") if host_port is not None: return Address(*host_port) return None @property def session(self) -> typing.Dict[str, typing.Any]: assert ( "session" in self.scope ), "SessionMiddleware must be installed to access request.session" return self.scope["session"] @property def auth(self) -> typing.Any: assert ( "auth" in self.scope ), "AuthenticationMiddleware must be installed to access request.auth" return self.scope["auth"] @property def user(self) -> typing.Any: assert ( "user" in self.scope ), "AuthenticationMiddleware must be installed to access request.user" return self.scope["user"] @property def state(self) -> State: if not hasattr(self, "_state"): # Ensure 'state' has an empty dict if it's not already populated. self.scope.setdefault("state", {}) # Create a state instance with a reference to the dict in which it should # store info self._state = State(self.scope["state"]) return self._state def url_for(self, name: str, **path_params: typing.Any) -> str: router: Router = self.scope["router"] url_path = router.url_path_for(name, **path_params) return url_path.make_absolute_url(base_url=self.base_url) async def empty_receive() -> typing.NoReturn: raise RuntimeError("Receive channel has not been made available") async def empty_send(message: Message) -> typing.NoReturn: raise RuntimeError("Send channel has not been made available") class Request(HTTPConnection): _form: typing.Optional[FormData] def __init__( self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send ): super().__init__(scope) assert scope["type"] == "http" self._receive = receive self._send = send self._stream_consumed = False self._is_disconnected = False self._form = None @property def method(self) -> str: return self.scope["method"] @property def receive(self) -> Receive: return self._receive async def stream(self) -> typing.AsyncGenerator[bytes, None]: if hasattr(self, "_body"): yield self._body yield b"" return if self._stream_consumed: raise RuntimeError("Stream consumed") self._stream_consumed = True while True: message = await self._receive() if message["type"] == "http.request": body = message.get("body", b"") if body: yield body if not message.get("more_body", False): break elif message["type"] == "http.disconnect": self._is_disconnected = True raise ClientDisconnect() yield b"" async def body(self) -> bytes: if not hasattr(self, "_body"): chunks: "typing.List[bytes]" = [] async for chunk in self.stream(): chunks.append(chunk) self._body = b"".join(chunks) return self._body async def json(self) -> typing.Any: if not hasattr(self, "_json"): body = await self.body() self._json = json.loads(body) return self._json async def _get_form( self, *, max_files: typing.Union[int, float] = 1000, max_fields: typing.Union[int, float] = 1000, ) -> FormData: if self._form is None: assert ( parse_options_header is not None ), "The `python-multipart` library must be installed to use form parsing." content_type_header = self.headers.get("Content-Type") content_type: bytes content_type, _ = parse_options_header(content_type_header) if content_type == b"multipart/form-data": try: multipart_parser = MultiPartParser( self.headers, self.stream(), max_files=max_files, max_fields=max_fields, ) self._form = await multipart_parser.parse() except MultiPartException as exc: if "app" in self.scope: raise HTTPException(status_code=400, detail=exc.message) raise exc elif content_type == b"application/x-www-form-urlencoded": form_parser = FormParser(self.headers, self.stream()) self._form = await form_parser.parse() else: self._form = FormData() return self._form def form( self, *, max_files: typing.Union[int, float] = 1000, max_fields: typing.Union[int, float] = 1000, ) -> AwaitableOrContextManager[FormData]: return AwaitableOrContextManagerWrapper( self._get_form(max_files=max_files, max_fields=max_fields) ) async def close(self) -> None: if self._form is not None: await self._form.close() async def is_disconnected(self) -> bool: if not self._is_disconnected: message: Message = {} # If message isn't immediately available, move on with anyio.CancelScope() as cs: cs.cancel() message = await self._receive() if message.get("type") == "http.disconnect": self._is_disconnected = True return self._is_disconnected async def send_push_promise(self, path: str) -> None: if "http.response.push" in self.scope.get("extensions", {}): raw_headers: "typing.List[typing.Tuple[bytes, bytes]]" = [] for name in SERVER_PUSH_HEADERS_TO_COPY: for value in self.headers.getlist(name): raw_headers.append( (name.encode("latin-1"), value.encode("latin-1")) ) await self._send( {"type": "http.response.push", "path": path, "headers": raw_headers} )
10,745
Python
32.68652
88
0.578595
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/convertors.py
import math import typing import uuid T = typing.TypeVar("T") class Convertor(typing.Generic[T]): regex: typing.ClassVar[str] = "" def convert(self, value: str) -> T: raise NotImplementedError() # pragma: no cover def to_string(self, value: T) -> str: raise NotImplementedError() # pragma: no cover class StringConvertor(Convertor): regex = "[^/]+" def convert(self, value: str) -> str: return value def to_string(self, value: str) -> str: value = str(value) assert "/" not in value, "May not contain path separators" assert value, "Must not be empty" return value class PathConvertor(Convertor): regex = ".*" def convert(self, value: str) -> str: return str(value) def to_string(self, value: str) -> str: return str(value) class IntegerConvertor(Convertor): regex = "[0-9]+" def convert(self, value: str) -> int: return int(value) def to_string(self, value: int) -> str: value = int(value) assert value >= 0, "Negative integers are not supported" return str(value) class FloatConvertor(Convertor): regex = r"[0-9]+(\.[0-9]+)?" def convert(self, value: str) -> float: return float(value) def to_string(self, value: float) -> str: value = float(value) assert value >= 0.0, "Negative floats are not supported" assert not math.isnan(value), "NaN values are not supported" assert not math.isinf(value), "Infinite values are not supported" return ("%0.20f" % value).rstrip("0").rstrip(".") class UUIDConvertor(Convertor): regex = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" def convert(self, value: str) -> uuid.UUID: return uuid.UUID(value) def to_string(self, value: uuid.UUID) -> str: return str(value) CONVERTOR_TYPES = { "str": StringConvertor(), "path": PathConvertor(), "int": IntegerConvertor(), "float": FloatConvertor(), "uuid": UUIDConvertor(), } def register_url_convertor(key: str, convertor: Convertor) -> None: CONVERTOR_TYPES[key] = convertor
2,168
Python
23.647727
74
0.60655
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/status.py
""" HTTP codes See HTTP Status Code Registry: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml And RFC 2324 - https://tools.ietf.org/html/rfc2324 """ import warnings from typing import List __all__ = ( "HTTP_100_CONTINUE", "HTTP_101_SWITCHING_PROTOCOLS", "HTTP_102_PROCESSING", "HTTP_103_EARLY_HINTS", "HTTP_200_OK", "HTTP_201_CREATED", "HTTP_202_ACCEPTED", "HTTP_203_NON_AUTHORITATIVE_INFORMATION", "HTTP_204_NO_CONTENT", "HTTP_205_RESET_CONTENT", "HTTP_206_PARTIAL_CONTENT", "HTTP_207_MULTI_STATUS", "HTTP_208_ALREADY_REPORTED", "HTTP_226_IM_USED", "HTTP_300_MULTIPLE_CHOICES", "HTTP_301_MOVED_PERMANENTLY", "HTTP_302_FOUND", "HTTP_303_SEE_OTHER", "HTTP_304_NOT_MODIFIED", "HTTP_305_USE_PROXY", "HTTP_306_RESERVED", "HTTP_307_TEMPORARY_REDIRECT", "HTTP_308_PERMANENT_REDIRECT", "HTTP_400_BAD_REQUEST", "HTTP_401_UNAUTHORIZED", "HTTP_402_PAYMENT_REQUIRED", "HTTP_403_FORBIDDEN", "HTTP_404_NOT_FOUND", "HTTP_405_METHOD_NOT_ALLOWED", "HTTP_406_NOT_ACCEPTABLE", "HTTP_407_PROXY_AUTHENTICATION_REQUIRED", "HTTP_408_REQUEST_TIMEOUT", "HTTP_409_CONFLICT", "HTTP_410_GONE", "HTTP_411_LENGTH_REQUIRED", "HTTP_412_PRECONDITION_FAILED", "HTTP_413_REQUEST_ENTITY_TOO_LARGE", "HTTP_414_REQUEST_URI_TOO_LONG", "HTTP_415_UNSUPPORTED_MEDIA_TYPE", "HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE", "HTTP_417_EXPECTATION_FAILED", "HTTP_418_IM_A_TEAPOT", "HTTP_421_MISDIRECTED_REQUEST", "HTTP_422_UNPROCESSABLE_ENTITY", "HTTP_423_LOCKED", "HTTP_424_FAILED_DEPENDENCY", "HTTP_425_TOO_EARLY", "HTTP_426_UPGRADE_REQUIRED", "HTTP_428_PRECONDITION_REQUIRED", "HTTP_429_TOO_MANY_REQUESTS", "HTTP_431_REQUEST_HEADER_FIELDS_TOO_LARGE", "HTTP_451_UNAVAILABLE_FOR_LEGAL_REASONS", "HTTP_500_INTERNAL_SERVER_ERROR", "HTTP_501_NOT_IMPLEMENTED", "HTTP_502_BAD_GATEWAY", "HTTP_503_SERVICE_UNAVAILABLE", "HTTP_504_GATEWAY_TIMEOUT", "HTTP_505_HTTP_VERSION_NOT_SUPPORTED", "HTTP_506_VARIANT_ALSO_NEGOTIATES", "HTTP_507_INSUFFICIENT_STORAGE", "HTTP_508_LOOP_DETECTED", "HTTP_510_NOT_EXTENDED", "HTTP_511_NETWORK_AUTHENTICATION_REQUIRED", "WS_1000_NORMAL_CLOSURE", "WS_1001_GOING_AWAY", "WS_1002_PROTOCOL_ERROR", "WS_1003_UNSUPPORTED_DATA", "WS_1005_NO_STATUS_RCVD", "WS_1006_ABNORMAL_CLOSURE", "WS_1007_INVALID_FRAME_PAYLOAD_DATA", "WS_1008_POLICY_VIOLATION", "WS_1009_MESSAGE_TOO_BIG", "WS_1010_MANDATORY_EXT", "WS_1011_INTERNAL_ERROR", "WS_1012_SERVICE_RESTART", "WS_1013_TRY_AGAIN_LATER", "WS_1014_BAD_GATEWAY", "WS_1015_TLS_HANDSHAKE", ) HTTP_100_CONTINUE = 100 HTTP_101_SWITCHING_PROTOCOLS = 101 HTTP_102_PROCESSING = 102 HTTP_103_EARLY_HINTS = 103 HTTP_200_OK = 200 HTTP_201_CREATED = 201 HTTP_202_ACCEPTED = 202 HTTP_203_NON_AUTHORITATIVE_INFORMATION = 203 HTTP_204_NO_CONTENT = 204 HTTP_205_RESET_CONTENT = 205 HTTP_206_PARTIAL_CONTENT = 206 HTTP_207_MULTI_STATUS = 207 HTTP_208_ALREADY_REPORTED = 208 HTTP_226_IM_USED = 226 HTTP_300_MULTIPLE_CHOICES = 300 HTTP_301_MOVED_PERMANENTLY = 301 HTTP_302_FOUND = 302 HTTP_303_SEE_OTHER = 303 HTTP_304_NOT_MODIFIED = 304 HTTP_305_USE_PROXY = 305 HTTP_306_RESERVED = 306 HTTP_307_TEMPORARY_REDIRECT = 307 HTTP_308_PERMANENT_REDIRECT = 308 HTTP_400_BAD_REQUEST = 400 HTTP_401_UNAUTHORIZED = 401 HTTP_402_PAYMENT_REQUIRED = 402 HTTP_403_FORBIDDEN = 403 HTTP_404_NOT_FOUND = 404 HTTP_405_METHOD_NOT_ALLOWED = 405 HTTP_406_NOT_ACCEPTABLE = 406 HTTP_407_PROXY_AUTHENTICATION_REQUIRED = 407 HTTP_408_REQUEST_TIMEOUT = 408 HTTP_409_CONFLICT = 409 HTTP_410_GONE = 410 HTTP_411_LENGTH_REQUIRED = 411 HTTP_412_PRECONDITION_FAILED = 412 HTTP_413_REQUEST_ENTITY_TOO_LARGE = 413 HTTP_414_REQUEST_URI_TOO_LONG = 414 HTTP_415_UNSUPPORTED_MEDIA_TYPE = 415 HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE = 416 HTTP_417_EXPECTATION_FAILED = 417 HTTP_418_IM_A_TEAPOT = 418 HTTP_421_MISDIRECTED_REQUEST = 421 HTTP_422_UNPROCESSABLE_ENTITY = 422 HTTP_423_LOCKED = 423 HTTP_424_FAILED_DEPENDENCY = 424 HTTP_425_TOO_EARLY = 425 HTTP_426_UPGRADE_REQUIRED = 426 HTTP_428_PRECONDITION_REQUIRED = 428 HTTP_429_TOO_MANY_REQUESTS = 429 HTTP_431_REQUEST_HEADER_FIELDS_TOO_LARGE = 431 HTTP_451_UNAVAILABLE_FOR_LEGAL_REASONS = 451 HTTP_500_INTERNAL_SERVER_ERROR = 500 HTTP_501_NOT_IMPLEMENTED = 501 HTTP_502_BAD_GATEWAY = 502 HTTP_503_SERVICE_UNAVAILABLE = 503 HTTP_504_GATEWAY_TIMEOUT = 504 HTTP_505_HTTP_VERSION_NOT_SUPPORTED = 505 HTTP_506_VARIANT_ALSO_NEGOTIATES = 506 HTTP_507_INSUFFICIENT_STORAGE = 507 HTTP_508_LOOP_DETECTED = 508 HTTP_510_NOT_EXTENDED = 510 HTTP_511_NETWORK_AUTHENTICATION_REQUIRED = 511 """ WebSocket codes https://www.iana.org/assignments/websocket/websocket.xml#close-code-number https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent """ WS_1000_NORMAL_CLOSURE = 1000 WS_1001_GOING_AWAY = 1001 WS_1002_PROTOCOL_ERROR = 1002 WS_1003_UNSUPPORTED_DATA = 1003 WS_1005_NO_STATUS_RCVD = 1005 WS_1006_ABNORMAL_CLOSURE = 1006 WS_1007_INVALID_FRAME_PAYLOAD_DATA = 1007 WS_1008_POLICY_VIOLATION = 1008 WS_1009_MESSAGE_TOO_BIG = 1009 WS_1010_MANDATORY_EXT = 1010 WS_1011_INTERNAL_ERROR = 1011 WS_1012_SERVICE_RESTART = 1012 WS_1013_TRY_AGAIN_LATER = 1013 WS_1014_BAD_GATEWAY = 1014 WS_1015_TLS_HANDSHAKE = 1015 __deprecated__ = {"WS_1004_NO_STATUS_RCVD": 1004, "WS_1005_ABNORMAL_CLOSURE": 1005} def __getattr__(name: str) -> int: deprecation_changes = { "WS_1004_NO_STATUS_RCVD": "WS_1005_NO_STATUS_RCVD", "WS_1005_ABNORMAL_CLOSURE": "WS_1006_ABNORMAL_CLOSURE", } deprecated = __deprecated__.get(name) if deprecated: warnings.warn( f"'{name}' is deprecated. Use '{deprecation_changes[name]}' instead.", category=DeprecationWarning, stacklevel=3, ) return deprecated raise AttributeError(f"module '{__name__}' has no attribute '{name}'") def __dir__() -> List[str]: return sorted(list(__all__) + list(__deprecated__.keys())) # pragma: no cover
6,086
Python
29.435
83
0.698324
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/applications.py
import typing import warnings from starlette.datastructures import State, URLPath from starlette.middleware import Middleware from starlette.middleware.base import BaseHTTPMiddleware from starlette.middleware.errors import ServerErrorMiddleware from starlette.middleware.exceptions import ExceptionMiddleware from starlette.requests import Request from starlette.responses import Response from starlette.routing import BaseRoute, Router from starlette.types import ASGIApp, Receive, Scope, Send class Starlette: """ Creates an application instance. **Parameters:** * **debug** - Boolean indicating if debug tracebacks should be returned on errors. * **routes** - A list of routes to serve incoming HTTP and WebSocket requests. * **middleware** - A list of middleware to run for every request. A starlette application will always automatically include two middleware classes. `ServerErrorMiddleware` is added as the very outermost middleware, to handle any uncaught errors occurring anywhere in the entire stack. `ExceptionMiddleware` is added as the very innermost middleware, to deal with handled exception cases occurring in the routing or endpoints. * **exception_handlers** - A mapping of either integer status codes, or exception class types onto callables which handle the exceptions. Exception handler callables should be of the form `handler(request, exc) -> response` and may be be either standard functions, or async functions. * **on_startup** - A list of callables to run on application startup. Startup handler callables do not take any arguments, and may be be either standard functions, or async functions. * **on_shutdown** - A list of callables to run on application shutdown. Shutdown handler callables do not take any arguments, and may be be either standard functions, or async functions. """ def __init__( self, debug: bool = False, routes: typing.Optional[typing.Sequence[BaseRoute]] = None, middleware: typing.Optional[typing.Sequence[Middleware]] = None, exception_handlers: typing.Optional[ typing.Mapping[ typing.Any, typing.Callable[ [Request, Exception], typing.Union[Response, typing.Awaitable[Response]], ], ] ] = None, on_startup: typing.Optional[typing.Sequence[typing.Callable]] = None, on_shutdown: typing.Optional[typing.Sequence[typing.Callable]] = None, lifespan: typing.Optional[ typing.Callable[["Starlette"], typing.AsyncContextManager] ] = None, ) -> None: # The lifespan context function is a newer style that replaces # on_startup / on_shutdown handlers. Use one or the other, not both. assert lifespan is None or ( on_startup is None and on_shutdown is None ), "Use either 'lifespan' or 'on_startup'/'on_shutdown', not both." self.debug = debug self.state = State() self.router = Router( routes, on_startup=on_startup, on_shutdown=on_shutdown, lifespan=lifespan ) self.exception_handlers = ( {} if exception_handlers is None else dict(exception_handlers) ) self.user_middleware = [] if middleware is None else list(middleware) self.middleware_stack: typing.Optional[ASGIApp] = None def build_middleware_stack(self) -> ASGIApp: debug = self.debug error_handler = None exception_handlers: typing.Dict[ typing.Any, typing.Callable[[Request, Exception], Response] ] = {} for key, value in self.exception_handlers.items(): if key in (500, Exception): error_handler = value else: exception_handlers[key] = value middleware = ( [Middleware(ServerErrorMiddleware, handler=error_handler, debug=debug)] + self.user_middleware + [ Middleware( ExceptionMiddleware, handlers=exception_handlers, debug=debug ) ] ) app = self.router for cls, options in reversed(middleware): app = cls(app=app, **options) return app @property def routes(self) -> typing.List[BaseRoute]: return self.router.routes def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath: return self.router.url_path_for(name, **path_params) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: scope["app"] = self if self.middleware_stack is None: self.middleware_stack = self.build_middleware_stack() await self.middleware_stack(scope, receive, send) def on_event(self, event_type: str) -> typing.Callable: # pragma: nocover return self.router.on_event(event_type) def mount( self, path: str, app: ASGIApp, name: typing.Optional[str] = None ) -> None: # pragma: nocover self.router.mount(path, app=app, name=name) def host( self, host: str, app: ASGIApp, name: typing.Optional[str] = None ) -> None: # pragma: no cover self.router.host(host, app=app, name=name) def add_middleware(self, middleware_class: type, **options: typing.Any) -> None: if self.middleware_stack is not None: # pragma: no cover raise RuntimeError("Cannot add middleware after an application has started") self.user_middleware.insert(0, Middleware(middleware_class, **options)) def add_exception_handler( self, exc_class_or_status_code: typing.Union[int, typing.Type[Exception]], handler: typing.Callable, ) -> None: # pragma: no cover self.exception_handlers[exc_class_or_status_code] = handler def add_event_handler( self, event_type: str, func: typing.Callable ) -> None: # pragma: no cover self.router.add_event_handler(event_type, func) def add_route( self, path: str, route: typing.Callable, methods: typing.Optional[typing.List[str]] = None, name: typing.Optional[str] = None, include_in_schema: bool = True, ) -> None: # pragma: no cover self.router.add_route( path, route, methods=methods, name=name, include_in_schema=include_in_schema ) def add_websocket_route( self, path: str, route: typing.Callable, name: typing.Optional[str] = None ) -> None: # pragma: no cover self.router.add_websocket_route(path, route, name=name) def exception_handler( self, exc_class_or_status_code: typing.Union[int, typing.Type[Exception]] ) -> typing.Callable: warnings.warn( "The `exception_handler` decorator is deprecated, and will be removed in version 1.0.0. " # noqa: E501 "Refer to https://www.starlette.io/exceptions/ for the recommended approach.", # noqa: E501 DeprecationWarning, ) def decorator(func: typing.Callable) -> typing.Callable: self.add_exception_handler(exc_class_or_status_code, func) return func return decorator def route( self, path: str, methods: typing.Optional[typing.List[str]] = None, name: typing.Optional[str] = None, include_in_schema: bool = True, ) -> typing.Callable: """ We no longer document this decorator style API, and its usage is discouraged. Instead you should use the following approach: >>> routes = [Route(path, endpoint=...), ...] >>> app = Starlette(routes=routes) """ warnings.warn( "The `route` decorator is deprecated, and will be removed in version 1.0.0. " # noqa: E501 "Refer to https://www.starlette.io/routing/ for the recommended approach.", # noqa: E501 DeprecationWarning, ) def decorator(func: typing.Callable) -> typing.Callable: self.router.add_route( path, func, methods=methods, name=name, include_in_schema=include_in_schema, ) return func return decorator def websocket_route( self, path: str, name: typing.Optional[str] = None ) -> typing.Callable: """ We no longer document this decorator style API, and its usage is discouraged. Instead you should use the following approach: >>> routes = [WebSocketRoute(path, endpoint=...), ...] >>> app = Starlette(routes=routes) """ warnings.warn( "The `websocket_route` decorator is deprecated, and will be removed in version 1.0.0. " # noqa: E501 "Refer to https://www.starlette.io/routing/#websocket-routing for the recommended approach.", # noqa: E501 DeprecationWarning, ) def decorator(func: typing.Callable) -> typing.Callable: self.router.add_websocket_route(path, func, name=name) return func return decorator def middleware(self, middleware_type: str) -> typing.Callable: """ We no longer document this decorator style API, and its usage is discouraged. Instead you should use the following approach: >>> middleware = [Middleware(...), ...] >>> app = Starlette(middleware=middleware) """ warnings.warn( "The `middleware` decorator is deprecated, and will be removed in version 1.0.0. " # noqa: E501 "Refer to https://www.starlette.io/middleware/#using-middleware for recommended approach.", # noqa: E501 DeprecationWarning, ) assert ( middleware_type == "http" ), 'Currently only middleware("http") is supported.' def decorator(func: typing.Callable) -> typing.Callable: self.add_middleware(BaseHTTPMiddleware, dispatch=func) return func return decorator
10,174
Python
38.437984
119
0.625713
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/schemas.py
import inspect import re import typing from starlette.requests import Request from starlette.responses import Response from starlette.routing import BaseRoute, Mount, Route try: import yaml except ImportError: # pragma: nocover yaml = None # type: ignore[assignment] class OpenAPIResponse(Response): media_type = "application/vnd.oai.openapi" def render(self, content: typing.Any) -> bytes: assert yaml is not None, "`pyyaml` must be installed to use OpenAPIResponse." assert isinstance( content, dict ), "The schema passed to OpenAPIResponse should be a dictionary." return yaml.dump(content, default_flow_style=False).encode("utf-8") class EndpointInfo(typing.NamedTuple): path: str http_method: str func: typing.Callable class BaseSchemaGenerator: def get_schema(self, routes: typing.List[BaseRoute]) -> dict: raise NotImplementedError() # pragma: no cover def get_endpoints( self, routes: typing.List[BaseRoute] ) -> typing.List[EndpointInfo]: """ Given the routes, yields the following information: - path eg: /users/ - http_method one of 'get', 'post', 'put', 'patch', 'delete', 'options' - func method ready to extract the docstring """ endpoints_info: list = [] for route in routes: if isinstance(route, Mount): path = self._remove_converter(route.path) routes = route.routes or [] sub_endpoints = [ EndpointInfo( path="".join((path, sub_endpoint.path)), http_method=sub_endpoint.http_method, func=sub_endpoint.func, ) for sub_endpoint in self.get_endpoints(routes) ] endpoints_info.extend(sub_endpoints) elif not isinstance(route, Route) or not route.include_in_schema: continue elif inspect.isfunction(route.endpoint) or inspect.ismethod(route.endpoint): path = self._remove_converter(route.path) for method in route.methods or ["GET"]: if method == "HEAD": continue endpoints_info.append( EndpointInfo(path, method.lower(), route.endpoint) ) else: path = self._remove_converter(route.path) for method in ["get", "post", "put", "patch", "delete", "options"]: if not hasattr(route.endpoint, method): continue func = getattr(route.endpoint, method) endpoints_info.append(EndpointInfo(path, method.lower(), func)) return endpoints_info def _remove_converter(self, path: str) -> str: """ Remove the converter from the path. For example, a route like this: Route("/users/{id:int}", endpoint=get_user, methods=["GET"]) Should be represented as `/users/{id}` in the OpenAPI schema. """ return re.sub(r":\w+}", "}", path) def parse_docstring(self, func_or_method: typing.Callable) -> dict: """ Given a function, parse the docstring as YAML and return a dictionary of info. """ docstring = func_or_method.__doc__ if not docstring: return {} assert yaml is not None, "`pyyaml` must be installed to use parse_docstring." # We support having regular docstrings before the schema # definition. Here we return just the schema part from # the docstring. docstring = docstring.split("---")[-1] parsed = yaml.safe_load(docstring) if not isinstance(parsed, dict): # A regular docstring (not yaml formatted) can return # a simple string here, which wouldn't follow the schema. return {} return parsed def OpenAPIResponse(self, request: Request) -> Response: routes = request.app.routes schema = self.get_schema(routes=routes) return OpenAPIResponse(schema) class SchemaGenerator(BaseSchemaGenerator): def __init__(self, base_schema: dict) -> None: self.base_schema = base_schema def get_schema(self, routes: typing.List[BaseRoute]) -> dict: schema = dict(self.base_schema) schema.setdefault("paths", {}) endpoints_info = self.get_endpoints(routes) for endpoint in endpoints_info: parsed = self.parse_docstring(endpoint.func) if not parsed: continue if endpoint.path not in schema["paths"]: schema["paths"][endpoint.path] = {} schema["paths"][endpoint.path][endpoint.http_method] = parsed return schema
4,952
Python
32.693877
88
0.576131
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/middleware/wsgi.py
import io import math import sys import typing import warnings import anyio from starlette.types import Receive, Scope, Send warnings.warn( "starlette.middleware.wsgi is deprecated and will be removed in a future release. " "Please refer to https://github.com/abersheeran/a2wsgi as a replacement.", DeprecationWarning, ) def build_environ(scope: Scope, body: bytes) -> dict: """ Builds a scope and request body into a WSGI environ object. """ environ = { "REQUEST_METHOD": scope["method"], "SCRIPT_NAME": scope.get("root_path", "").encode("utf8").decode("latin1"), "PATH_INFO": scope["path"].encode("utf8").decode("latin1"), "QUERY_STRING": scope["query_string"].decode("ascii"), "SERVER_PROTOCOL": f"HTTP/{scope['http_version']}", "wsgi.version": (1, 0), "wsgi.url_scheme": scope.get("scheme", "http"), "wsgi.input": io.BytesIO(body), "wsgi.errors": sys.stdout, "wsgi.multithread": True, "wsgi.multiprocess": True, "wsgi.run_once": False, } # Get server name and port - required in WSGI, not in ASGI server = scope.get("server") or ("localhost", 80) environ["SERVER_NAME"] = server[0] environ["SERVER_PORT"] = server[1] # Get client IP address if scope.get("client"): environ["REMOTE_ADDR"] = scope["client"][0] # Go through headers and make them into environ entries for name, value in scope.get("headers", []): name = name.decode("latin1") if name == "content-length": corrected_name = "CONTENT_LENGTH" elif name == "content-type": corrected_name = "CONTENT_TYPE" else: corrected_name = f"HTTP_{name}".upper().replace("-", "_") # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in # case value = value.decode("latin1") if corrected_name in environ: value = environ[corrected_name] + "," + value environ[corrected_name] = value return environ class WSGIMiddleware: def __init__(self, app: typing.Callable) -> None: self.app = app async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: assert scope["type"] == "http" responder = WSGIResponder(self.app, scope) await responder(receive, send) class WSGIResponder: def __init__(self, app: typing.Callable, scope: Scope) -> None: self.app = app self.scope = scope self.status = None self.response_headers = None self.stream_send, self.stream_receive = anyio.create_memory_object_stream( math.inf ) self.response_started = False self.exc_info: typing.Any = None async def __call__(self, receive: Receive, send: Send) -> None: body = b"" more_body = True while more_body: message = await receive() body += message.get("body", b"") more_body = message.get("more_body", False) environ = build_environ(self.scope, body) async with anyio.create_task_group() as task_group: task_group.start_soon(self.sender, send) async with self.stream_send: await anyio.to_thread.run_sync(self.wsgi, environ, self.start_response) if self.exc_info is not None: raise self.exc_info[0].with_traceback(self.exc_info[1], self.exc_info[2]) async def sender(self, send: Send) -> None: async with self.stream_receive: async for message in self.stream_receive: await send(message) def start_response( self, status: str, response_headers: typing.List[typing.Tuple[str, str]], exc_info: typing.Any = None, ) -> None: self.exc_info = exc_info if not self.response_started: self.response_started = True status_code_string, _ = status.split(" ", 1) status_code = int(status_code_string) headers = [ (name.strip().encode("ascii").lower(), value.strip().encode("ascii")) for name, value in response_headers ] anyio.from_thread.run( self.stream_send.send, { "type": "http.response.start", "status": status_code, "headers": headers, }, ) def wsgi(self, environ: dict, start_response: typing.Callable) -> None: for chunk in self.app(environ, start_response): anyio.from_thread.run( self.stream_send.send, {"type": "http.response.body", "body": chunk, "more_body": True}, ) anyio.from_thread.run( self.stream_send.send, {"type": "http.response.body", "body": b""} )
4,906
Python
33.801418
87
0.572564
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/middleware/gzip.py
import gzip import io import typing from starlette.datastructures import Headers, MutableHeaders from starlette.types import ASGIApp, Message, Receive, Scope, Send class GZipMiddleware: def __init__( self, app: ASGIApp, minimum_size: int = 500, compresslevel: int = 9 ) -> None: self.app = app self.minimum_size = minimum_size self.compresslevel = compresslevel async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if scope["type"] == "http": headers = Headers(scope=scope) if "gzip" in headers.get("Accept-Encoding", ""): responder = GZipResponder( self.app, self.minimum_size, compresslevel=self.compresslevel ) await responder(scope, receive, send) return await self.app(scope, receive, send) class GZipResponder: def __init__(self, app: ASGIApp, minimum_size: int, compresslevel: int = 9) -> None: self.app = app self.minimum_size = minimum_size self.send: Send = unattached_send self.initial_message: Message = {} self.started = False self.content_encoding_set = False self.gzip_buffer = io.BytesIO() self.gzip_file = gzip.GzipFile( mode="wb", fileobj=self.gzip_buffer, compresslevel=compresslevel ) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: self.send = send await self.app(scope, receive, self.send_with_gzip) async def send_with_gzip(self, message: Message) -> None: message_type = message["type"] if message_type == "http.response.start": # Don't send the initial message until we've determined how to # modify the outgoing headers correctly. self.initial_message = message headers = Headers(raw=self.initial_message["headers"]) self.content_encoding_set = "content-encoding" in headers elif message_type == "http.response.body" and self.content_encoding_set: if not self.started: self.started = True await self.send(self.initial_message) await self.send(message) elif message_type == "http.response.body" and not self.started: self.started = True body = message.get("body", b"") more_body = message.get("more_body", False) if len(body) < self.minimum_size and not more_body: # Don't apply GZip to small outgoing responses. await self.send(self.initial_message) await self.send(message) elif not more_body: # Standard GZip response. self.gzip_file.write(body) self.gzip_file.close() body = self.gzip_buffer.getvalue() headers = MutableHeaders(raw=self.initial_message["headers"]) headers["Content-Encoding"] = "gzip" headers["Content-Length"] = str(len(body)) headers.add_vary_header("Accept-Encoding") message["body"] = body await self.send(self.initial_message) await self.send(message) else: # Initial body in streaming GZip response. headers = MutableHeaders(raw=self.initial_message["headers"]) headers["Content-Encoding"] = "gzip" headers.add_vary_header("Accept-Encoding") del headers["Content-Length"] self.gzip_file.write(body) message["body"] = self.gzip_buffer.getvalue() self.gzip_buffer.seek(0) self.gzip_buffer.truncate() await self.send(self.initial_message) await self.send(message) elif message_type == "http.response.body": # Remaining body in streaming GZip response. body = message.get("body", b"") more_body = message.get("more_body", False) self.gzip_file.write(body) if not more_body: self.gzip_file.close() message["body"] = self.gzip_buffer.getvalue() self.gzip_buffer.seek(0) self.gzip_buffer.truncate() await self.send(message) async def unattached_send(message: Message) -> typing.NoReturn: raise RuntimeError("send awaitable not set") # pragma: no cover
4,507
Python
38.543859
88
0.576437
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/middleware/exceptions.py
import typing from starlette._utils import is_async_callable from starlette.concurrency import run_in_threadpool from starlette.exceptions import HTTPException, WebSocketException from starlette.requests import Request from starlette.responses import PlainTextResponse, Response from starlette.types import ASGIApp, Message, Receive, Scope, Send from starlette.websockets import WebSocket class ExceptionMiddleware: def __init__( self, app: ASGIApp, handlers: typing.Optional[ typing.Mapping[typing.Any, typing.Callable[[Request, Exception], Response]] ] = None, debug: bool = False, ) -> None: self.app = app self.debug = debug # TODO: We ought to handle 404 cases if debug is set. self._status_handlers: typing.Dict[int, typing.Callable] = {} self._exception_handlers: typing.Dict[ typing.Type[Exception], typing.Callable ] = { HTTPException: self.http_exception, WebSocketException: self.websocket_exception, } if handlers is not None: for key, value in handlers.items(): self.add_exception_handler(key, value) def add_exception_handler( self, exc_class_or_status_code: typing.Union[int, typing.Type[Exception]], handler: typing.Callable[[Request, Exception], Response], ) -> None: if isinstance(exc_class_or_status_code, int): self._status_handlers[exc_class_or_status_code] = handler else: assert issubclass(exc_class_or_status_code, Exception) self._exception_handlers[exc_class_or_status_code] = handler def _lookup_exception_handler( self, exc: Exception ) -> typing.Optional[typing.Callable]: for cls in type(exc).__mro__: if cls in self._exception_handlers: return self._exception_handlers[cls] return None async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if scope["type"] not in ("http", "websocket"): await self.app(scope, receive, send) return response_started = False async def sender(message: Message) -> None: nonlocal response_started if message["type"] == "http.response.start": response_started = True await send(message) try: await self.app(scope, receive, sender) except Exception as exc: handler = None if isinstance(exc, HTTPException): handler = self._status_handlers.get(exc.status_code) if handler is None: handler = self._lookup_exception_handler(exc) if handler is None: raise exc if response_started: msg = "Caught handled exception, but response already started." raise RuntimeError(msg) from exc if scope["type"] == "http": request = Request(scope, receive=receive) if is_async_callable(handler): response = await handler(request, exc) else: response = await run_in_threadpool(handler, request, exc) await response(scope, receive, sender) elif scope["type"] == "websocket": websocket = WebSocket(scope, receive=receive, send=send) if is_async_callable(handler): await handler(websocket, exc) else: await run_in_threadpool(handler, websocket, exc) def http_exception(self, request: Request, exc: HTTPException) -> Response: if exc.status_code in {204, 304}: return Response(status_code=exc.status_code, headers=exc.headers) return PlainTextResponse( exc.detail, status_code=exc.status_code, headers=exc.headers ) async def websocket_exception( self, websocket: WebSocket, exc: WebSocketException ) -> None: await websocket.close(code=exc.code, reason=exc.reason)
4,128
Python
36.536363
87
0.601502
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/middleware/httpsredirect.py
from starlette.datastructures import URL from starlette.responses import RedirectResponse from starlette.types import ASGIApp, Receive, Scope, Send class HTTPSRedirectMiddleware: def __init__(self, app: ASGIApp) -> None: self.app = app async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if scope["type"] in ("http", "websocket") and scope["scheme"] in ("http", "ws"): url = URL(scope=scope) redirect_scheme = {"http": "https", "ws": "wss"}[url.scheme] netloc = url.hostname if url.port in (80, 443) else url.netloc url = url.replace(scheme=redirect_scheme, netloc=netloc) response = RedirectResponse(url, status_code=307) await response(scope, receive, send) else: await self.app(scope, receive, send)
848
Python
41.449998
88
0.630896
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/middleware/base.py
import typing import anyio from starlette.background import BackgroundTask from starlette.requests import Request from starlette.responses import ContentStream, Response, StreamingResponse from starlette.types import ASGIApp, Message, Receive, Scope, Send RequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]] DispatchFunction = typing.Callable[ [Request, RequestResponseEndpoint], typing.Awaitable[Response] ] T = typing.TypeVar("T") class BaseHTTPMiddleware: def __init__( self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None ) -> None: self.app = app self.dispatch_func = self.dispatch if dispatch is None else dispatch async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if scope["type"] != "http": await self.app(scope, receive, send) return response_sent = anyio.Event() async def call_next(request: Request) -> Response: app_exc: typing.Optional[Exception] = None send_stream, recv_stream = anyio.create_memory_object_stream() async def receive_or_disconnect() -> Message: if response_sent.is_set(): return {"type": "http.disconnect"} async with anyio.create_task_group() as task_group: async def wrap(func: typing.Callable[[], typing.Awaitable[T]]) -> T: result = await func() task_group.cancel_scope.cancel() return result task_group.start_soon(wrap, response_sent.wait) message = await wrap(request.receive) if response_sent.is_set(): return {"type": "http.disconnect"} return message async def close_recv_stream_on_response_sent() -> None: await response_sent.wait() recv_stream.close() async def send_no_error(message: Message) -> None: try: await send_stream.send(message) except anyio.BrokenResourceError: # recv_stream has been closed, i.e. response_sent has been set. return async def coro() -> None: nonlocal app_exc async with send_stream: try: await self.app(scope, receive_or_disconnect, send_no_error) except Exception as exc: app_exc = exc task_group.start_soon(close_recv_stream_on_response_sent) task_group.start_soon(coro) try: message = await recv_stream.receive() info = message.get("info", None) if message["type"] == "http.response.debug" and info is not None: message = await recv_stream.receive() except anyio.EndOfStream: if app_exc is not None: raise app_exc raise RuntimeError("No response returned.") assert message["type"] == "http.response.start" async def body_stream() -> typing.AsyncGenerator[bytes, None]: async with recv_stream: async for message in recv_stream: assert message["type"] == "http.response.body" body = message.get("body", b"") if body: yield body if app_exc is not None: raise app_exc response = _StreamingResponse( status_code=message["status"], content=body_stream(), info=info ) response.raw_headers = message["headers"] return response async with anyio.create_task_group() as task_group: request = Request(scope, receive=receive) response = await self.dispatch_func(request, call_next) await response(scope, receive, send) response_sent.set() async def dispatch( self, request: Request, call_next: RequestResponseEndpoint ) -> Response: raise NotImplementedError() # pragma: no cover class _StreamingResponse(StreamingResponse): def __init__( self, content: ContentStream, status_code: int = 200, headers: typing.Optional[typing.Mapping[str, str]] = None, media_type: typing.Optional[str] = None, background: typing.Optional[BackgroundTask] = None, info: typing.Optional[typing.Mapping[str, typing.Any]] = None, ) -> None: self._info = info super().__init__(content, status_code, headers, media_type, background) async def stream_response(self, send: Send) -> None: if self._info: await send({"type": "http.response.debug", "info": self._info}) return await super().stream_response(send)
5,010
Python
36.118518
88
0.561876
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/middleware/cors.py
import functools import re import typing from starlette.datastructures import Headers, MutableHeaders from starlette.responses import PlainTextResponse, Response from starlette.types import ASGIApp, Message, Receive, Scope, Send ALL_METHODS = ("DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT") SAFELISTED_HEADERS = {"Accept", "Accept-Language", "Content-Language", "Content-Type"} class CORSMiddleware: def __init__( self, app: ASGIApp, allow_origins: typing.Sequence[str] = (), allow_methods: typing.Sequence[str] = ("GET",), allow_headers: typing.Sequence[str] = (), allow_credentials: bool = False, allow_origin_regex: typing.Optional[str] = None, expose_headers: typing.Sequence[str] = (), max_age: int = 600, ) -> None: if "*" in allow_methods: allow_methods = ALL_METHODS compiled_allow_origin_regex = None if allow_origin_regex is not None: compiled_allow_origin_regex = re.compile(allow_origin_regex) allow_all_origins = "*" in allow_origins allow_all_headers = "*" in allow_headers preflight_explicit_allow_origin = not allow_all_origins or allow_credentials simple_headers = {} if allow_all_origins: simple_headers["Access-Control-Allow-Origin"] = "*" if allow_credentials: simple_headers["Access-Control-Allow-Credentials"] = "true" if expose_headers: simple_headers["Access-Control-Expose-Headers"] = ", ".join(expose_headers) preflight_headers = {} if preflight_explicit_allow_origin: # The origin value will be set in preflight_response() if it is allowed. preflight_headers["Vary"] = "Origin" else: preflight_headers["Access-Control-Allow-Origin"] = "*" preflight_headers.update( { "Access-Control-Allow-Methods": ", ".join(allow_methods), "Access-Control-Max-Age": str(max_age), } ) allow_headers = sorted(SAFELISTED_HEADERS | set(allow_headers)) if allow_headers and not allow_all_headers: preflight_headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers) if allow_credentials: preflight_headers["Access-Control-Allow-Credentials"] = "true" self.app = app self.allow_origins = allow_origins self.allow_methods = allow_methods self.allow_headers = [h.lower() for h in allow_headers] self.allow_all_origins = allow_all_origins self.allow_all_headers = allow_all_headers self.preflight_explicit_allow_origin = preflight_explicit_allow_origin self.allow_origin_regex = compiled_allow_origin_regex self.simple_headers = simple_headers self.preflight_headers = preflight_headers async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if scope["type"] != "http": # pragma: no cover await self.app(scope, receive, send) return method = scope["method"] headers = Headers(scope=scope) origin = headers.get("origin") if origin is None: await self.app(scope, receive, send) return if method == "OPTIONS" and "access-control-request-method" in headers: response = self.preflight_response(request_headers=headers) await response(scope, receive, send) return await self.simple_response(scope, receive, send, request_headers=headers) def is_allowed_origin(self, origin: str) -> bool: if self.allow_all_origins: return True if self.allow_origin_regex is not None and self.allow_origin_regex.fullmatch( origin ): return True return origin in self.allow_origins def preflight_response(self, request_headers: Headers) -> Response: requested_origin = request_headers["origin"] requested_method = request_headers["access-control-request-method"] requested_headers = request_headers.get("access-control-request-headers") headers = dict(self.preflight_headers) failures = [] if self.is_allowed_origin(origin=requested_origin): if self.preflight_explicit_allow_origin: # The "else" case is already accounted for in self.preflight_headers # and the value would be "*". headers["Access-Control-Allow-Origin"] = requested_origin else: failures.append("origin") if requested_method not in self.allow_methods: failures.append("method") # If we allow all headers, then we have to mirror back any requested # headers in the response. if self.allow_all_headers and requested_headers is not None: headers["Access-Control-Allow-Headers"] = requested_headers elif requested_headers is not None: for header in [h.lower() for h in requested_headers.split(",")]: if header.strip() not in self.allow_headers: failures.append("headers") break # We don't strictly need to use 400 responses here, since its up to # the browser to enforce the CORS policy, but its more informative # if we do. if failures: failure_text = "Disallowed CORS " + ", ".join(failures) return PlainTextResponse(failure_text, status_code=400, headers=headers) return PlainTextResponse("OK", status_code=200, headers=headers) async def simple_response( self, scope: Scope, receive: Receive, send: Send, request_headers: Headers ) -> None: send = functools.partial(self.send, send=send, request_headers=request_headers) await self.app(scope, receive, send) async def send( self, message: Message, send: Send, request_headers: Headers ) -> None: if message["type"] != "http.response.start": await send(message) return message.setdefault("headers", []) headers = MutableHeaders(scope=message) headers.update(self.simple_headers) origin = request_headers["Origin"] has_cookie = "cookie" in request_headers # If request includes any cookie headers, then we must respond # with the specific origin instead of '*'. if self.allow_all_origins and has_cookie: self.allow_explicit_origin(headers, origin) # If we only allow specific origins, then we have to mirror back # the Origin header in the response. elif not self.allow_all_origins and self.is_allowed_origin(origin=origin): self.allow_explicit_origin(headers, origin) await send(message) @staticmethod def allow_explicit_origin(headers: MutableHeaders, origin: str) -> None: headers["Access-Control-Allow-Origin"] = origin headers.add_vary_header("Origin")
7,076
Python
38.758427
88
0.623657
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/middleware/__init__.py
import typing class Middleware: def __init__(self, cls: type, **options: typing.Any) -> None: self.cls = cls self.options = options def __iter__(self) -> typing.Iterator: as_tuple = (self.cls, self.options) return iter(as_tuple) def __repr__(self) -> str: class_name = self.__class__.__name__ option_strings = [f"{key}={value!r}" for key, value in self.options.items()] args_repr = ", ".join([self.cls.__name__] + option_strings) return f"{class_name}({args_repr})"
546
Python
29.388887
84
0.564103
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/middleware/errors.py
import html import inspect import traceback import typing from starlette._utils import is_async_callable from starlette.concurrency import run_in_threadpool from starlette.requests import Request from starlette.responses import HTMLResponse, PlainTextResponse, Response from starlette.types import ASGIApp, Message, Receive, Scope, Send STYLES = """ p { color: #211c1c; } .traceback-container { border: 1px solid #038BB8; } .traceback-title { background-color: #038BB8; color: lemonchiffon; padding: 12px; font-size: 20px; margin-top: 0px; } .frame-line { padding-left: 10px; font-family: monospace; } .frame-filename { font-family: monospace; } .center-line { background-color: #038BB8; color: #f9f6e1; padding: 5px 0px 5px 5px; } .lineno { margin-right: 5px; } .frame-title { font-weight: unset; padding: 10px 10px 10px 10px; background-color: #E4F4FD; margin-right: 10px; color: #191f21; font-size: 17px; border: 1px solid #c7dce8; } .collapse-btn { float: right; padding: 0px 5px 1px 5px; border: solid 1px #96aebb; cursor: pointer; } .collapsed { display: none; } .source-code { font-family: courier; font-size: small; padding-bottom: 10px; } """ JS = """ <script type="text/javascript"> function collapse(element){ const frameId = element.getAttribute("data-frame-id"); const frame = document.getElementById(frameId); if (frame.classList.contains("collapsed")){ element.innerHTML = "&#8210;"; frame.classList.remove("collapsed"); } else { element.innerHTML = "+"; frame.classList.add("collapsed"); } } </script> """ TEMPLATE = """ <html> <head> <style type='text/css'> {styles} </style> <title>Starlette Debugger</title> </head> <body> <h1>500 Server Error</h1> <h2>{error}</h2> <div class="traceback-container"> <p class="traceback-title">Traceback</p> <div>{exc_html}</div> </div> {js} </body> </html> """ FRAME_TEMPLATE = """ <div> <p class="frame-title">File <span class="frame-filename">{frame_filename}</span>, line <i>{frame_lineno}</i>, in <b>{frame_name}</b> <span class="collapse-btn" data-frame-id="{frame_filename}-{frame_lineno}" onclick="collapse(this)">{collapse_button}</span> </p> <div id="{frame_filename}-{frame_lineno}" class="source-code {collapsed}">{code_context}</div> </div> """ # noqa: E501 LINE = """ <p><span class="frame-line"> <span class="lineno">{lineno}.</span> {line}</span></p> """ CENTER_LINE = """ <p class="center-line"><span class="frame-line center-line"> <span class="lineno">{lineno}.</span> {line}</span></p> """ class ServerErrorMiddleware: """ Handles returning 500 responses when a server error occurs. If 'debug' is set, then traceback responses will be returned, otherwise the designated 'handler' will be called. This middleware class should generally be used to wrap *everything* else up, so that unhandled exceptions anywhere in the stack always result in an appropriate 500 response. """ def __init__( self, app: ASGIApp, handler: typing.Optional[typing.Callable] = None, debug: bool = False, ) -> None: self.app = app self.handler = handler self.debug = debug async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if scope["type"] != "http": await self.app(scope, receive, send) return response_started = False async def _send(message: Message) -> None: nonlocal response_started, send if message["type"] == "http.response.start": response_started = True await send(message) try: await self.app(scope, receive, _send) except Exception as exc: request = Request(scope) if self.debug: # In debug mode, return traceback responses. response = self.debug_response(request, exc) elif self.handler is None: # Use our default 500 error handler. response = self.error_response(request, exc) else: # Use an installed 500 error handler. if is_async_callable(self.handler): response = await self.handler(request, exc) else: response = await run_in_threadpool(self.handler, request, exc) if not response_started: await response(scope, receive, send) # We always continue to raise the exception. # This allows servers to log the error, or allows test clients # to optionally raise the error within the test case. raise exc def format_line( self, index: int, line: str, frame_lineno: int, frame_index: int ) -> str: values = { # HTML escape - line could contain < or > "line": html.escape(line).replace(" ", "&nbsp"), "lineno": (frame_lineno - frame_index) + index, } if index != frame_index: return LINE.format(**values) return CENTER_LINE.format(**values) def generate_frame_html(self, frame: inspect.FrameInfo, is_collapsed: bool) -> str: code_context = "".join( self.format_line( index, line, frame.lineno, frame.index # type: ignore[arg-type] ) for index, line in enumerate(frame.code_context or []) ) values = { # HTML escape - filename could contain < or >, especially if it's a virtual # file e.g. <stdin> in the REPL "frame_filename": html.escape(frame.filename), "frame_lineno": frame.lineno, # HTML escape - if you try very hard it's possible to name a function with < # or > "frame_name": html.escape(frame.function), "code_context": code_context, "collapsed": "collapsed" if is_collapsed else "", "collapse_button": "+" if is_collapsed else "&#8210;", } return FRAME_TEMPLATE.format(**values) def generate_html(self, exc: Exception, limit: int = 7) -> str: traceback_obj = traceback.TracebackException.from_exception( exc, capture_locals=True ) exc_html = "" is_collapsed = False exc_traceback = exc.__traceback__ if exc_traceback is not None: frames = inspect.getinnerframes(exc_traceback, limit) for frame in reversed(frames): exc_html += self.generate_frame_html(frame, is_collapsed) is_collapsed = True # escape error class and text error = ( f"{html.escape(traceback_obj.exc_type.__name__)}: " f"{html.escape(str(traceback_obj))}" ) return TEMPLATE.format(styles=STYLES, js=JS, error=error, exc_html=exc_html) def generate_plain_text(self, exc: Exception) -> str: return "".join(traceback.format_exception(type(exc), exc, exc.__traceback__)) def debug_response(self, request: Request, exc: Exception) -> Response: accept = request.headers.get("accept", "") if "text/html" in accept: content = self.generate_html(exc) return HTMLResponse(content, status_code=500) content = self.generate_plain_text(exc) return PlainTextResponse(content, status_code=500) def error_response(self, request: Request, exc: Exception) -> Response: return PlainTextResponse("Internal Server Error", status_code=500)
7,833
Python
29.48249
128
0.591217
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/middleware/authentication.py
import typing from starlette.authentication import ( AuthCredentials, AuthenticationBackend, AuthenticationError, UnauthenticatedUser, ) from starlette.requests import HTTPConnection from starlette.responses import PlainTextResponse, Response from starlette.types import ASGIApp, Receive, Scope, Send class AuthenticationMiddleware: def __init__( self, app: ASGIApp, backend: AuthenticationBackend, on_error: typing.Optional[ typing.Callable[[HTTPConnection, AuthenticationError], Response] ] = None, ) -> None: self.app = app self.backend = backend self.on_error: typing.Callable[ [HTTPConnection, AuthenticationError], Response ] = (on_error if on_error is not None else self.default_on_error) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if scope["type"] not in ["http", "websocket"]: await self.app(scope, receive, send) return conn = HTTPConnection(scope) try: auth_result = await self.backend.authenticate(conn) except AuthenticationError as exc: response = self.on_error(conn, exc) if scope["type"] == "websocket": await send({"type": "websocket.close", "code": 1000}) else: await response(scope, receive, send) return if auth_result is None: auth_result = AuthCredentials(), UnauthenticatedUser() scope["auth"], scope["user"] = auth_result await self.app(scope, receive, send) @staticmethod def default_on_error(conn: HTTPConnection, exc: Exception) -> Response: return PlainTextResponse(str(exc), status_code=400)
1,787
Python
32.735848
81
0.628428
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/middleware/trustedhost.py
import typing from starlette.datastructures import URL, Headers from starlette.responses import PlainTextResponse, RedirectResponse, Response from starlette.types import ASGIApp, Receive, Scope, Send ENFORCE_DOMAIN_WILDCARD = "Domain wildcard patterns must be like '*.example.com'." class TrustedHostMiddleware: def __init__( self, app: ASGIApp, allowed_hosts: typing.Optional[typing.Sequence[str]] = None, www_redirect: bool = True, ) -> None: if allowed_hosts is None: allowed_hosts = ["*"] for pattern in allowed_hosts: assert "*" not in pattern[1:], ENFORCE_DOMAIN_WILDCARD if pattern.startswith("*") and pattern != "*": assert pattern.startswith("*."), ENFORCE_DOMAIN_WILDCARD self.app = app self.allowed_hosts = list(allowed_hosts) self.allow_any = "*" in allowed_hosts self.www_redirect = www_redirect async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if self.allow_any or scope["type"] not in ( "http", "websocket", ): # pragma: no cover await self.app(scope, receive, send) return headers = Headers(scope=scope) host = headers.get("host", "").split(":")[0] is_valid_host = False found_www_redirect = False for pattern in self.allowed_hosts: if host == pattern or ( pattern.startswith("*") and host.endswith(pattern[1:]) ): is_valid_host = True break elif "www." + host == pattern: found_www_redirect = True if is_valid_host: await self.app(scope, receive, send) else: response: Response if found_www_redirect and self.www_redirect: url = URL(scope=scope) redirect_url = url.replace(netloc="www." + url.netloc) response = RedirectResponse(url=str(redirect_url)) else: response = PlainTextResponse("Invalid host header", status_code=400) await response(scope, receive, send)
2,207
Python
35.196721
84
0.57227
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/middleware/sessions.py
import json import sys import typing from base64 import b64decode, b64encode import itsdangerous from itsdangerous.exc import BadSignature from starlette.datastructures import MutableHeaders, Secret from starlette.requests import HTTPConnection from starlette.types import ASGIApp, Message, Receive, Scope, Send if sys.version_info >= (3, 8): # pragma: no cover from typing import Literal else: # pragma: no cover from typing_extensions import Literal class SessionMiddleware: def __init__( self, app: ASGIApp, secret_key: typing.Union[str, Secret], session_cookie: str = "session", max_age: typing.Optional[int] = 14 * 24 * 60 * 60, # 14 days, in seconds path: str = "/", same_site: Literal["lax", "strict", "none"] = "lax", https_only: bool = False, ) -> None: self.app = app self.signer = itsdangerous.TimestampSigner(str(secret_key)) self.session_cookie = session_cookie self.max_age = max_age self.path = path self.security_flags = "httponly; samesite=" + same_site if https_only: # Secure flag can be used with HTTPS only self.security_flags += "; secure" async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if scope["type"] not in ("http", "websocket"): # pragma: no cover await self.app(scope, receive, send) return connection = HTTPConnection(scope) initial_session_was_empty = True if self.session_cookie in connection.cookies: data = connection.cookies[self.session_cookie].encode("utf-8") try: data = self.signer.unsign(data, max_age=self.max_age) scope["session"] = json.loads(b64decode(data)) initial_session_was_empty = False except BadSignature: scope["session"] = {} else: scope["session"] = {} async def send_wrapper(message: Message) -> None: if message["type"] == "http.response.start": if scope["session"]: # We have session data to persist. data = b64encode(json.dumps(scope["session"]).encode("utf-8")) data = self.signer.sign(data) headers = MutableHeaders(scope=message) header_value = "{session_cookie}={data}; path={path}; {max_age}{security_flags}".format( # noqa E501 session_cookie=self.session_cookie, data=data.decode("utf-8"), path=self.path, max_age=f"Max-Age={self.max_age}; " if self.max_age else "", security_flags=self.security_flags, ) headers.append("Set-Cookie", header_value) elif not initial_session_was_empty: # The session has been cleared. headers = MutableHeaders(scope=message) header_value = "{session_cookie}={data}; path={path}; {expires}{security_flags}".format( # noqa E501 session_cookie=self.session_cookie, data="null", path=self.path, expires="expires=Thu, 01 Jan 1970 00:00:00 GMT; ", security_flags=self.security_flags, ) headers.append("Set-Cookie", header_value) await send(message) await self.app(scope, receive, send_wrapper)
3,612
Python
40.528735
121
0.555925
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jmespath-0.10.0.dist-info/DESCRIPTION.rst
JMESPath ======== .. image:: https://badges.gitter.im/Join Chat.svg :target: https://gitter.im/jmespath/chat .. image:: https://travis-ci.org/jmespath/jmespath.py.svg?branch=develop :target: https://travis-ci.org/jmespath/jmespath.py .. image:: https://codecov.io/github/jmespath/jmespath.py/coverage.svg?branch=develop :target: https://codecov.io/github/jmespath/jmespath.py?branch=develop JMESPath (pronounced "james path") allows you to declaratively specify how to extract elements from a JSON document. For example, given this document:: {"foo": {"bar": "baz"}} The jmespath expression ``foo.bar`` will return "baz". JMESPath also supports: Referencing elements in a list. Given the data:: {"foo": {"bar": ["one", "two"]}} The expression: ``foo.bar[0]`` will return "one". You can also reference all the items in a list using the ``*`` syntax:: {"foo": {"bar": [{"name": "one"}, {"name": "two"}]}} The expression: ``foo.bar[*].name`` will return ["one", "two"]. Negative indexing is also supported (-1 refers to the last element in the list). Given the data above, the expression ``foo.bar[-1].name`` will return "two". The ``*`` can also be used for hash types:: {"foo": {"bar": {"name": "one"}, "baz": {"name": "two"}}} The expression: ``foo.*.name`` will return ["one", "two"]. Installation ============ You can install JMESPath from pypi with: .. code:: bash pip install jmespath API === The ``jmespath.py`` library has two functions that operate on python data structures. You can use ``search`` and give it the jmespath expression and the data: .. code:: python >>> import jmespath >>> path = jmespath.search('foo.bar', {'foo': {'bar': 'baz'}}) 'baz' Similar to the ``re`` module, you can use the ``compile`` function to compile the JMESPath expression and use this parsed expression to perform repeated searches: .. code:: python >>> import jmespath >>> expression = jmespath.compile('foo.bar') >>> expression.search({'foo': {'bar': 'baz'}}) 'baz' >>> expression.search({'foo': {'bar': 'other'}}) 'other' This is useful if you're going to use the same jmespath expression to search multiple documents. This avoids having to reparse the JMESPath expression each time you search a new document. Options ------- You can provide an instance of ``jmespath.Options`` to control how a JMESPath expression is evaluated. The most common scenario for using an ``Options`` instance is if you want to have ordered output of your dict keys. To do this you can use either of these options: .. code:: python >>> import jmespath >>> jmespath.search('{a: a, b: b}', ... mydata, ... jmespath.Options(dict_cls=collections.OrderedDict)) >>> import jmespath >>> parsed = jmespath.compile('{a: a, b: b}') >>> parsed.search(mydata, ... jmespath.Options(dict_cls=collections.OrderedDict)) Custom Functions ~~~~~~~~~~~~~~~~ The JMESPath language has numerous `built-in functions <http://jmespath.org/specification.html#built-in-functions>`__, but it is also possible to add your own custom functions. Keep in mind that custom function support in jmespath.py is experimental and the API may change based on feedback. **If you have a custom function that you've found useful, consider submitting it to jmespath.site and propose that it be added to the JMESPath language.** You can submit proposals `here <https://github.com/jmespath/jmespath.site/issues>`__. To create custom functions: * Create a subclass of ``jmespath.functions.Functions``. * Create a method with the name ``_func_<your function name>``. * Apply the ``jmespath.functions.signature`` decorator that indicates the expected types of the function arguments. * Provide an instance of your subclass in a ``jmespath.Options`` object. Below are a few examples: .. code:: python import jmespath from jmespath import functions # 1. Create a subclass of functions.Functions. # The function.Functions base class has logic # that introspects all of its methods and automatically # registers your custom functions in its function table. class CustomFunctions(functions.Functions): # 2 and 3. Create a function that starts with _func_ # and decorate it with @signature which indicates its # expected types. # In this example, we're creating a jmespath function # called "unique_letters" that accepts a single argument # with an expected type "string". @functions.signature({'types': ['string']}) def _func_unique_letters(self, s): # Given a string s, return a sorted # string of unique letters: 'ccbbadd' -> 'abcd' return ''.join(sorted(set(s))) # Here's another example. This is creating # a jmespath function called "my_add" that expects # two arguments, both of which should be of type number. @functions.signature({'types': ['number']}, {'types': ['number']}) def _func_my_add(self, x, y): return x + y # 4. Provide an instance of your subclass in a Options object. options = jmespath.Options(custom_functions=CustomFunctions()) # Provide this value to jmespath.search: # This will print 3 print( jmespath.search( 'my_add(`1`, `2`)', {}, options=options) ) # This will print "abcd" print( jmespath.search( 'foo.bar | unique_letters(@)', {'foo': {'bar': 'ccbbadd'}}, options=options) ) Again, if you come up with useful functions that you think make sense in the JMESPath language (and make sense to implement in all JMESPath libraries, not just python), please let us know at `jmespath.site <https://github.com/jmespath/jmespath.site/issues>`__. Specification ============= If you'd like to learn more about the JMESPath language, you can check out the `JMESPath tutorial <http://jmespath.org/tutorial.html>`__. Also check out the `JMESPath examples page <http://jmespath.org/examples.html>`__ for examples of more complex jmespath queries. The grammar is specified using ABNF, as described in `RFC4234 <http://www.ietf.org/rfc/rfc4234.txt>`_. You can find the most up to date `grammar for JMESPath here <http://jmespath.org/specification.html#grammar>`__. You can read the full `JMESPath specification here <http://jmespath.org/specification.html>`__. Testing ======= In addition to the unit tests for the jmespath modules, there is a ``tests/compliance`` directory that contains .json files with test cases. This allows other implementations to verify they are producing the correct output. Each json file is grouped by feature. Discuss ======= Join us on our `Gitter channel <https://gitter.im/jmespath/chat>`__ if you want to chat or if you have any questions.
6,925
reStructuredText
29.782222
85
0.674513
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/attrs-20.1.0.dist-info/AUTHORS.rst
Credits ======= ``attrs`` is written and maintained by `Hynek Schlawack <https://hynek.me/>`_. The development is kindly supported by `Variomedia AG <https://www.variomedia.de/>`_. A full list of contributors can be found in `GitHub's overview <https://github.com/python-attrs/attrs/graphs/contributors>`_. It’s the spiritual successor of `characteristic <https://characteristic.readthedocs.io/>`_ and aspires to fix some of it clunkiness and unfortunate decisions. Both were inspired by Twisted’s `FancyEqMixin <https://twistedmatrix.com/documents/current/api/twisted.python.util.FancyEqMixin.html>`_ but both are implemented using class decorators because `subclassing is bad for you <https://www.youtube.com/watch?v=3MNVP9-hglc>`_, m’kay?
746
reStructuredText
61.249995
275
0.766756
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/filepost.py
from __future__ import absolute_import import binascii import codecs import os from io import BytesIO from .fields import RequestField from .packages import six from .packages.six import b writer = codecs.lookup("utf-8")[3] def choose_boundary(): """ Our embarrassingly-simple replacement for mimetools.choose_boundary. """ boundary = binascii.hexlify(os.urandom(16)) if not six.PY2: boundary = boundary.decode("ascii") return boundary def iter_field_objects(fields): """ Iterate over fields. Supports list of (k, v) tuples and dicts, and lists of :class:`~urllib3.fields.RequestField`. """ if isinstance(fields, dict): i = six.iteritems(fields) else: i = iter(fields) for field in i: if isinstance(field, RequestField): yield field else: yield RequestField.from_tuples(*field) def iter_fields(fields): """ .. deprecated:: 1.6 Iterate over fields. The addition of :class:`~urllib3.fields.RequestField` makes this function obsolete. Instead, use :func:`iter_field_objects`, which returns :class:`~urllib3.fields.RequestField` objects. Supports list of (k, v) tuples and dicts. """ if isinstance(fields, dict): return ((k, v) for k, v in six.iteritems(fields)) return ((k, v) for k, v in fields) def encode_multipart_formdata(fields, boundary=None): """ Encode a dictionary of ``fields`` using the multipart/form-data MIME format. :param fields: Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). :param boundary: If not specified, then a random boundary will be generated using :func:`urllib3.filepost.choose_boundary`. """ body = BytesIO() if boundary is None: boundary = choose_boundary() for field in iter_field_objects(fields): body.write(b("--%s\r\n" % (boundary))) writer(body).write(field.render_headers()) data = field.data if isinstance(data, int): data = str(data) # Backwards compatibility if isinstance(data, six.text_type): writer(body).write(data) else: body.write(data) body.write(b"\r\n") body.write(b("--%s--\r\n" % (boundary))) content_type = str("multipart/form-data; boundary=%s" % boundary) return body.getvalue(), content_type
2,440
Python
23.656565
85
0.631557
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/_version.py
# This file is protected via CODEOWNERS __version__ = "1.26.16"
64
Python
20.66666
39
0.6875
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/connection.py
from __future__ import absolute_import import datetime import logging import os import re import socket import warnings from socket import error as SocketError from socket import timeout as SocketTimeout from .packages import six from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection from .packages.six.moves.http_client import HTTPException # noqa: F401 from .util.proxy import create_proxy_ssl_context try: # Compiled with SSL? import ssl BaseSSLError = ssl.SSLError except (ImportError, AttributeError): # Platform-specific: No SSL. ssl = None class BaseSSLError(BaseException): pass try: # Python 3: not a no-op, we're adding this to the namespace so it can be imported. ConnectionError = ConnectionError except NameError: # Python 2 class ConnectionError(Exception): pass try: # Python 3: # Not a no-op, we're adding this to the namespace so it can be imported. BrokenPipeError = BrokenPipeError except NameError: # Python 2: class BrokenPipeError(Exception): pass from ._collections import HTTPHeaderDict # noqa (historical, removed in v2) from ._version import __version__ from .exceptions import ( ConnectTimeoutError, NewConnectionError, SubjectAltNameWarning, SystemTimeWarning, ) from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection from .util.ssl_ import ( assert_fingerprint, create_urllib3_context, is_ipaddress, resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, ) from .util.ssl_match_hostname import CertificateError, match_hostname log = logging.getLogger(__name__) port_by_scheme = {"http": 80, "https": 443} # When it comes time to update this value as a part of regular maintenance # (ie test_recent_date is failing) update it to ~6 months before the current date. RECENT_DATE = datetime.date(2022, 1, 1) _CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") class HTTPConnection(_HTTPConnection, object): """ Based on :class:`http.client.HTTPConnection` but provides an extra constructor backwards-compatibility layer between older and newer Pythons. Additional keyword parameters are used to configure attributes of the connection. Accepted parameters include: - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool` - ``source_address``: Set the source address for the current connection. - ``socket_options``: Set specific options on the underlying socket. If not specified, then defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. For example, if you wish to enable TCP Keep Alive in addition to the defaults, you might pass: .. code-block:: python HTTPConnection.default_socket_options + [ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), ] Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). """ default_port = port_by_scheme["http"] #: Disable Nagle's algorithm by default. #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] #: Whether this connection verifies the host's certificate. is_verified = False #: Whether this proxy connection (if used) verifies the proxy host's #: certificate. proxy_is_verified = None def __init__(self, *args, **kw): if not six.PY2: kw.pop("strict", None) # Pre-set source_address. self.source_address = kw.get("source_address") #: The socket options provided by the user. If no options are #: provided, we use the default options. self.socket_options = kw.pop("socket_options", self.default_socket_options) # Proxy options provided by the user. self.proxy = kw.pop("proxy", None) self.proxy_config = kw.pop("proxy_config", None) _HTTPConnection.__init__(self, *args, **kw) @property def host(self): """ Getter method to remove any trailing dots that indicate the hostname is an FQDN. In general, SSL certificates don't include the trailing dot indicating a fully-qualified domain name, and thus, they don't validate properly when checked against a domain name that includes the dot. In addition, some servers may not expect to receive the trailing dot when provided. However, the hostname with trailing dot is critical to DNS resolution; doing a lookup with the trailing dot will properly only resolve the appropriate FQDN, whereas a lookup without a trailing dot will search the system's search domain list. Thus, it's important to keep the original host around for use only in those cases where it's appropriate (i.e., when doing DNS lookup to establish the actual TCP connection across which we're going to send HTTP requests). """ return self._dns_host.rstrip(".") @host.setter def host(self, value): """ Setter for the `host` property. We assume that only urllib3 uses the _dns_host attribute; httplib itself only uses `host`, and it seems reasonable that other libraries follow suit. """ self._dns_host = value def _new_conn(self): """Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw["source_address"] = self.source_address if self.socket_options: extra_kw["socket_options"] = self.socket_options try: conn = connection.create_connection( (self._dns_host, self.port), self.timeout, **extra_kw ) except SocketTimeout: raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout), ) except SocketError as e: raise NewConnectionError( self, "Failed to establish a new connection: %s" % e ) return conn def _is_using_tunnel(self): # Google App Engine's httplib does not define _tunnel_host return getattr(self, "_tunnel_host", None) def _prepare_conn(self, conn): self.sock = conn if self._is_using_tunnel(): # TODO: Fix tunnel so it doesn't depend on self.sock state. self._tunnel() # Mark this connection as not reusable self.auto_open = 0 def connect(self): conn = self._new_conn() self._prepare_conn(conn) def putrequest(self, method, url, *args, **kwargs): """ """ # Empty docstring because the indentation of CPython's implementation # is broken but we don't want this method in our documentation. match = _CONTAINS_CONTROL_CHAR_RE.search(method) if match: raise ValueError( "Method cannot contain non-token characters %r (found at least %r)" % (method, match.group()) ) return _HTTPConnection.putrequest(self, method, url, *args, **kwargs) def putheader(self, header, *values): """ """ if not any(isinstance(v, str) and v == SKIP_HEADER for v in values): _HTTPConnection.putheader(self, header, *values) elif six.ensure_str(header.lower()) not in SKIPPABLE_HEADERS: raise ValueError( "urllib3.util.SKIP_HEADER only supports '%s'" % ("', '".join(map(str.title, sorted(SKIPPABLE_HEADERS))),) ) def request(self, method, url, body=None, headers=None): # Update the inner socket's timeout value to send the request. # This only triggers if the connection is re-used. if getattr(self, "sock", None) is not None: self.sock.settimeout(self.timeout) if headers is None: headers = {} else: # Avoid modifying the headers passed into .request() headers = headers.copy() if "user-agent" not in (six.ensure_str(k.lower()) for k in headers): headers["User-Agent"] = _get_default_user_agent() super(HTTPConnection, self).request(method, url, body=body, headers=headers) def request_chunked(self, method, url, body=None, headers=None): """ Alternative to the common request method, which sends the body with chunked encoding and not as one block """ headers = headers or {} header_keys = set([six.ensure_str(k.lower()) for k in headers]) skip_accept_encoding = "accept-encoding" in header_keys skip_host = "host" in header_keys self.putrequest( method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) if "user-agent" not in header_keys: self.putheader("User-Agent", _get_default_user_agent()) for header, value in headers.items(): self.putheader(header, value) if "transfer-encoding" not in header_keys: self.putheader("Transfer-Encoding", "chunked") self.endheaders() if body is not None: stringish_types = six.string_types + (bytes,) if isinstance(body, stringish_types): body = (body,) for chunk in body: if not chunk: continue if not isinstance(chunk, bytes): chunk = chunk.encode("utf8") len_str = hex(len(chunk))[2:] to_send = bytearray(len_str.encode()) to_send += b"\r\n" to_send += chunk to_send += b"\r\n" self.send(to_send) # After the if clause, to always have a closed body self.send(b"0\r\n\r\n") class HTTPSConnection(HTTPConnection): """ Many of the parameters to this constructor are passed to the underlying SSL socket by means of :py:func:`urllib3.util.ssl_wrap_socket`. """ default_port = port_by_scheme["https"] cert_reqs = None ca_certs = None ca_cert_dir = None ca_cert_data = None ssl_version = None assert_fingerprint = None tls_in_tls_required = False def __init__( self, host, port=None, key_file=None, cert_file=None, key_password=None, strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, ssl_context=None, server_hostname=None, **kw ): HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw) self.key_file = key_file self.cert_file = cert_file self.key_password = key_password self.ssl_context = ssl_context self.server_hostname = server_hostname # Required property for Google AppEngine 1.9.0 which otherwise causes # HTTPS requests to go out as HTTP. (See Issue #356) self._protocol = "https" def set_cert( self, key_file=None, cert_file=None, cert_reqs=None, key_password=None, ca_certs=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None, ca_cert_data=None, ): """ This method should only be called once, before the connection is used. """ # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also # have an SSLContext object in which case we'll use its verify_mode. if cert_reqs is None: if self.ssl_context is not None: cert_reqs = self.ssl_context.verify_mode else: cert_reqs = resolve_cert_reqs(None) self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.key_password = key_password self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ca_certs = ca_certs and os.path.expanduser(ca_certs) self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) self.ca_cert_data = ca_cert_data def connect(self): # Add certificate verification self.sock = conn = self._new_conn() hostname = self.host tls_in_tls = False if self._is_using_tunnel(): if self.tls_in_tls_required: self.sock = conn = self._connect_tls_proxy(hostname, conn) tls_in_tls = True # Calls self._set_hostport(), so self.host is # self._tunnel_host below. self._tunnel() # Mark this connection as not reusable self.auto_open = 0 # Override the host with the one we're requesting data from. hostname = self._tunnel_host server_hostname = hostname if self.server_hostname is not None: server_hostname = self.server_hostname is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: warnings.warn( ( "System time is way off (before {0}). This will probably " "lead to SSL verification errors" ).format(RECENT_DATE), SystemTimeWarning, ) # Wrap socket using verification with the root certs in # trusted_root_certs default_ssl_context = False if self.ssl_context is None: default_ssl_context = True self.ssl_context = create_urllib3_context( ssl_version=resolve_ssl_version(self.ssl_version), cert_reqs=resolve_cert_reqs(self.cert_reqs), ) context = self.ssl_context context.verify_mode = resolve_cert_reqs(self.cert_reqs) # Try to load OS default certs if none are given. # Works well on Windows (requires Python3.4+) if ( not self.ca_certs and not self.ca_cert_dir and not self.ca_cert_data and default_ssl_context and hasattr(context, "load_default_certs") ): context.load_default_certs() self.sock = ssl_wrap_socket( sock=conn, keyfile=self.key_file, certfile=self.cert_file, key_password=self.key_password, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, server_hostname=server_hostname, ssl_context=context, tls_in_tls=tls_in_tls, ) # If we're using all defaults and the connection # is TLSv1 or TLSv1.1 we throw a DeprecationWarning # for the host. if ( default_ssl_context and self.ssl_version is None and hasattr(self.sock, "version") and self.sock.version() in {"TLSv1", "TLSv1.1"} ): warnings.warn( "Negotiating TLSv1/TLSv1.1 by default is deprecated " "and will be disabled in urllib3 v2.0.0. Connecting to " "'%s' with '%s' can be enabled by explicitly opting-in " "with 'ssl_version'" % (self.host, self.sock.version()), DeprecationWarning, ) if self.assert_fingerprint: assert_fingerprint( self.sock.getpeercert(binary_form=True), self.assert_fingerprint ) elif ( context.verify_mode != ssl.CERT_NONE and not getattr(context, "check_hostname", False) and self.assert_hostname is not False ): # While urllib3 attempts to always turn off hostname matching from # the TLS library, this cannot always be done. So we check whether # the TLS Library still thinks it's matching hostnames. cert = self.sock.getpeercert() if not cert.get("subjectAltName", ()): warnings.warn( ( "Certificate for {0} has no `subjectAltName`, falling back to check for a " "`commonName` for now. This feature is being removed by major browsers and " "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 " "for details.)".format(hostname) ), SubjectAltNameWarning, ) _match_hostname(cert, self.assert_hostname or server_hostname) self.is_verified = ( context.verify_mode == ssl.CERT_REQUIRED or self.assert_fingerprint is not None ) def _connect_tls_proxy(self, hostname, conn): """ Establish a TLS connection to the proxy using the provided SSL context. """ proxy_config = self.proxy_config ssl_context = proxy_config.ssl_context if ssl_context: # If the user provided a proxy context, we assume CA and client # certificates have already been set return ssl_wrap_socket( sock=conn, server_hostname=hostname, ssl_context=ssl_context, ) ssl_context = create_proxy_ssl_context( self.ssl_version, self.cert_reqs, self.ca_certs, self.ca_cert_dir, self.ca_cert_data, ) # If no cert was provided, use only the default options for server # certificate validation socket = ssl_wrap_socket( sock=conn, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, server_hostname=hostname, ssl_context=ssl_context, ) if ssl_context.verify_mode != ssl.CERT_NONE and not getattr( ssl_context, "check_hostname", False ): # While urllib3 attempts to always turn off hostname matching from # the TLS library, this cannot always be done. So we check whether # the TLS Library still thinks it's matching hostnames. cert = socket.getpeercert() if not cert.get("subjectAltName", ()): warnings.warn( ( "Certificate for {0} has no `subjectAltName`, falling back to check for a " "`commonName` for now. This feature is being removed by major browsers and " "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 " "for details.)".format(hostname) ), SubjectAltNameWarning, ) _match_hostname(cert, hostname) self.proxy_is_verified = ssl_context.verify_mode == ssl.CERT_REQUIRED return socket def _match_hostname(cert, asserted_hostname): # Our upstream implementation of ssl.match_hostname() # only applies this normalization to IP addresses so it doesn't # match DNS SANs so we do the same thing! stripped_hostname = asserted_hostname.strip("u[]") if is_ipaddress(stripped_hostname): asserted_hostname = stripped_hostname try: match_hostname(cert, asserted_hostname) except CertificateError as e: log.warning( "Certificate did not match expected hostname: %s. Certificate: %s", asserted_hostname, cert, ) # Add cert to exception and reraise so client code can inspect # the cert when catching the exception, if they want to e._peer_cert = cert raise def _get_default_user_agent(): return "python-urllib3/%s" % __version__ class DummyConnection(object): """Used to detect a failed ConnectionCls import.""" pass if not ssl: HTTPSConnection = DummyConnection # noqa: F811 VerifiedHTTPSConnection = HTTPSConnection
20,300
Python
34.429319
101
0.597291
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/response.py
from __future__ import absolute_import import io import logging import sys import warnings import zlib from contextlib import contextmanager from socket import error as SocketError from socket import timeout as SocketTimeout try: try: import brotlicffi as brotli except ImportError: import brotli except ImportError: brotli = None from . import util from ._collections import HTTPHeaderDict from .connection import BaseSSLError, HTTPException from .exceptions import ( BodyNotHttplibCompatible, DecodeError, HTTPError, IncompleteRead, InvalidChunkLength, InvalidHeader, ProtocolError, ReadTimeoutError, ResponseNotChunked, SSLError, ) from .packages import six from .util.response import is_fp_closed, is_response_to_head log = logging.getLogger(__name__) class DeflateDecoder(object): def __init__(self): self._first_try = True self._data = b"" self._obj = zlib.decompressobj() def __getattr__(self, name): return getattr(self._obj, name) def decompress(self, data): if not data: return data if not self._first_try: return self._obj.decompress(data) self._data += data try: decompressed = self._obj.decompress(data) if decompressed: self._first_try = False self._data = None return decompressed except zlib.error: self._first_try = False self._obj = zlib.decompressobj(-zlib.MAX_WBITS) try: return self.decompress(self._data) finally: self._data = None class GzipDecoderState(object): FIRST_MEMBER = 0 OTHER_MEMBERS = 1 SWALLOW_DATA = 2 class GzipDecoder(object): def __init__(self): self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) self._state = GzipDecoderState.FIRST_MEMBER def __getattr__(self, name): return getattr(self._obj, name) def decompress(self, data): ret = bytearray() if self._state == GzipDecoderState.SWALLOW_DATA or not data: return bytes(ret) while True: try: ret += self._obj.decompress(data) except zlib.error: previous_state = self._state # Ignore data after the first error self._state = GzipDecoderState.SWALLOW_DATA if previous_state == GzipDecoderState.OTHER_MEMBERS: # Allow trailing garbage acceptable in other gzip clients return bytes(ret) raise data = self._obj.unused_data if not data: return bytes(ret) self._state = GzipDecoderState.OTHER_MEMBERS self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) if brotli is not None: class BrotliDecoder(object): # Supports both 'brotlipy' and 'Brotli' packages # since they share an import name. The top branches # are for 'brotlipy' and bottom branches for 'Brotli' def __init__(self): self._obj = brotli.Decompressor() if hasattr(self._obj, "decompress"): self.decompress = self._obj.decompress else: self.decompress = self._obj.process def flush(self): if hasattr(self._obj, "flush"): return self._obj.flush() return b"" class MultiDecoder(object): """ From RFC7231: If one or more encodings have been applied to a representation, the sender that applied the encodings MUST generate a Content-Encoding header field that lists the content codings in the order in which they were applied. """ def __init__(self, modes): self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")] def flush(self): return self._decoders[0].flush() def decompress(self, data): for d in reversed(self._decoders): data = d.decompress(data) return data def _get_decoder(mode): if "," in mode: return MultiDecoder(mode) if mode == "gzip": return GzipDecoder() if brotli is not None and mode == "br": return BrotliDecoder() return DeflateDecoder() class HTTPResponse(io.IOBase): """ HTTP Response container. Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is loaded and decoded on-demand when the ``data`` property is accessed. This class is also compatible with the Python standard library's :mod:`io` module, and can hence be treated as a readable object in the context of that framework. Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`: :param preload_content: If True, the response's body will be preloaded during construction. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. :param original_response: When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse` object, it's convenient to include the original for debug purposes. It's otherwise unused. :param retries: The retries contains the last :class:`~urllib3.util.retry.Retry` that was used during the request. :param enforce_content_length: Enforce content length checking. Body returned by server must match value of Content-Length header, if present. Otherwise, raise error. """ CONTENT_DECODERS = ["gzip", "deflate"] if brotli is not None: CONTENT_DECODERS += ["br"] REDIRECT_STATUSES = [301, 302, 303, 307, 308] def __init__( self, body="", headers=None, status=0, version=0, reason=None, strict=0, preload_content=True, decode_content=True, original_response=None, pool=None, connection=None, msg=None, retries=None, enforce_content_length=False, request_method=None, request_url=None, auto_close=True, ): if isinstance(headers, HTTPHeaderDict): self.headers = headers else: self.headers = HTTPHeaderDict(headers) self.status = status self.version = version self.reason = reason self.strict = strict self.decode_content = decode_content self.retries = retries self.enforce_content_length = enforce_content_length self.auto_close = auto_close self._decoder = None self._body = None self._fp = None self._original_response = original_response self._fp_bytes_read = 0 self.msg = msg self._request_url = request_url if body and isinstance(body, (six.string_types, bytes)): self._body = body self._pool = pool self._connection = connection if hasattr(body, "read"): self._fp = body # Are we using the chunked-style of transfer encoding? self.chunked = False self.chunk_left = None tr_enc = self.headers.get("transfer-encoding", "").lower() # Don't incur the penalty of creating a list and then discarding it encodings = (enc.strip() for enc in tr_enc.split(",")) if "chunked" in encodings: self.chunked = True # Determine length of response self.length_remaining = self._init_length(request_method) # If requested, preload the body. if preload_content and not self._body: self._body = self.read(decode_content=decode_content) def get_redirect_location(self): """ Should we redirect and where to? :returns: Truthy redirect location string if we got a redirect status code and valid location. ``None`` if redirect status and no location. ``False`` if not a redirect status code. """ if self.status in self.REDIRECT_STATUSES: return self.headers.get("location") return False def release_conn(self): if not self._pool or not self._connection: return self._pool._put_conn(self._connection) self._connection = None def drain_conn(self): """ Read and discard any remaining HTTP response data in the response connection. Unread data in the HTTPResponse connection blocks the connection from being released back to the pool. """ try: self.read() except (HTTPError, SocketError, BaseSSLError, HTTPException): pass @property def data(self): # For backwards-compat with earlier urllib3 0.4 and earlier. if self._body: return self._body if self._fp: return self.read(cache_content=True) @property def connection(self): return self._connection def isclosed(self): return is_fp_closed(self._fp) def tell(self): """ Obtain the number of bytes pulled over the wire so far. May differ from the amount of content returned by :meth:``urllib3.response.HTTPResponse.read`` if bytes are encoded on the wire (e.g, compressed). """ return self._fp_bytes_read def _init_length(self, request_method): """ Set initial length value for Response content if available. """ length = self.headers.get("content-length") if length is not None: if self.chunked: # This Response will fail with an IncompleteRead if it can't be # received as chunked. This method falls back to attempt reading # the response before raising an exception. log.warning( "Received response with both Content-Length and " "Transfer-Encoding set. This is expressly forbidden " "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " "attempting to process response as Transfer-Encoding: " "chunked." ) return None try: # RFC 7230 section 3.3.2 specifies multiple content lengths can # be sent in a single Content-Length header # (e.g. Content-Length: 42, 42). This line ensures the values # are all valid ints and that as long as the `set` length is 1, # all values are the same. Otherwise, the header is invalid. lengths = set([int(val) for val in length.split(",")]) if len(lengths) > 1: raise InvalidHeader( "Content-Length contained multiple " "unmatching values (%s)" % length ) length = lengths.pop() except ValueError: length = None else: if length < 0: length = None # Convert status to int for comparison # In some cases, httplib returns a status of "_UNKNOWN" try: status = int(self.status) except ValueError: status = 0 # Check for responses that shouldn't include a body if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD": length = 0 return length def _init_decoder(self): """ Set-up the _decoder attribute if necessary. """ # Note: content-encoding value should be case-insensitive, per RFC 7230 # Section 3.2 content_encoding = self.headers.get("content-encoding", "").lower() if self._decoder is None: if content_encoding in self.CONTENT_DECODERS: self._decoder = _get_decoder(content_encoding) elif "," in content_encoding: encodings = [ e.strip() for e in content_encoding.split(",") if e.strip() in self.CONTENT_DECODERS ] if len(encodings): self._decoder = _get_decoder(content_encoding) DECODER_ERROR_CLASSES = (IOError, zlib.error) if brotli is not None: DECODER_ERROR_CLASSES += (brotli.error,) def _decode(self, data, decode_content, flush_decoder): """ Decode the data passed in and potentially flush the decoder. """ if not decode_content: return data try: if self._decoder: data = self._decoder.decompress(data) except self.DECODER_ERROR_CLASSES as e: content_encoding = self.headers.get("content-encoding", "").lower() raise DecodeError( "Received response with content-encoding: %s, but " "failed to decode it." % content_encoding, e, ) if flush_decoder: data += self._flush_decoder() return data def _flush_decoder(self): """ Flushes the decoder. Should only be called if the decoder is actually being used. """ if self._decoder: buf = self._decoder.decompress(b"") return buf + self._decoder.flush() return b"" @contextmanager def _error_catcher(self): """ Catch low-level python exceptions, instead re-raising urllib3 variants, so that low-level exceptions are not leaked in the high-level api. On exit, release the connection back to the pool. """ clean_exit = False try: try: yield except SocketTimeout: # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but # there is yet no clean way to get at it from this context. raise ReadTimeoutError(self._pool, None, "Read timed out.") except BaseSSLError as e: # FIXME: Is there a better way to differentiate between SSLErrors? if "read operation timed out" not in str(e): # SSL errors related to framing/MAC get wrapped and reraised here raise SSLError(e) raise ReadTimeoutError(self._pool, None, "Read timed out.") except (HTTPException, SocketError) as e: # This includes IncompleteRead. raise ProtocolError("Connection broken: %r" % e, e) # If no exception is thrown, we should avoid cleaning up # unnecessarily. clean_exit = True finally: # If we didn't terminate cleanly, we need to throw away our # connection. if not clean_exit: # The response may not be closed but we're not going to use it # anymore so close it now to ensure that the connection is # released back to the pool. if self._original_response: self._original_response.close() # Closing the response may not actually be sufficient to close # everything, so if we have a hold of the connection close that # too. if self._connection: self._connection.close() # If we hold the original response but it's closed now, we should # return the connection back to the pool. if self._original_response and self._original_response.isclosed(): self.release_conn() def _fp_read(self, amt): """ Read a response with the thought that reading the number of bytes larger than can fit in a 32-bit int at a time via SSL in some known cases leads to an overflow error that has to be prevented if `amt` or `self.length_remaining` indicate that a problem may happen. The known cases: * 3.8 <= CPython < 3.9.7 because of a bug https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900. * urllib3 injected with pyOpenSSL-backed SSL-support. * CPython < 3.10 only when `amt` does not fit 32-bit int. """ assert self._fp c_int_max = 2 ** 31 - 1 if ( ( (amt and amt > c_int_max) or (self.length_remaining and self.length_remaining > c_int_max) ) and not util.IS_SECURETRANSPORT and (util.IS_PYOPENSSL or sys.version_info < (3, 10)) ): buffer = io.BytesIO() # Besides `max_chunk_amt` being a maximum chunk size, it # affects memory overhead of reading a response by this # method in CPython. # `c_int_max` equal to 2 GiB - 1 byte is the actual maximum # chunk size that does not lead to an overflow error, but # 256 MiB is a compromise. max_chunk_amt = 2 ** 28 while amt is None or amt != 0: if amt is not None: chunk_amt = min(amt, max_chunk_amt) amt -= chunk_amt else: chunk_amt = max_chunk_amt data = self._fp.read(chunk_amt) if not data: break buffer.write(data) del data # to reduce peak memory usage by `max_chunk_amt`. return buffer.getvalue() else: # StringIO doesn't like amt=None return self._fp.read(amt) if amt is not None else self._fp.read() def read(self, amt=None, decode_content=None, cache_content=False): """ Similar to :meth:`http.client.HTTPResponse.read`, but with two additional parameters: ``decode_content`` and ``cache_content``. :param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. :param cache_content: If True, will save the returned data such that the same result is returned despite of the state of the underlying file object. This is useful if you want the ``.data`` property to continue working after having ``.read()`` the file object. (Overridden if ``amt`` is set.) """ self._init_decoder() if decode_content is None: decode_content = self.decode_content if self._fp is None: return flush_decoder = False fp_closed = getattr(self._fp, "closed", False) with self._error_catcher(): data = self._fp_read(amt) if not fp_closed else b"" if amt is None: flush_decoder = True else: cache_content = False if ( amt != 0 and not data ): # Platform-specific: Buggy versions of Python. # Close the connection when no data is returned # # This is redundant to what httplib/http.client _should_ # already do. However, versions of python released before # December 15, 2012 (http://bugs.python.org/issue16298) do # not properly close the connection in all cases. There is # no harm in redundantly calling close. self._fp.close() flush_decoder = True if self.enforce_content_length and self.length_remaining not in ( 0, None, ): # This is an edge case that httplib failed to cover due # to concerns of backward compatibility. We're # addressing it here to make sure IncompleteRead is # raised during streaming, so all calls with incorrect # Content-Length are caught. raise IncompleteRead(self._fp_bytes_read, self.length_remaining) if data: self._fp_bytes_read += len(data) if self.length_remaining is not None: self.length_remaining -= len(data) data = self._decode(data, decode_content, flush_decoder) if cache_content: self._body = data return data def stream(self, amt=2 ** 16, decode_content=None): """ A generator wrapper for the read() method. A call will block until ``amt`` bytes have been read from the connection or until the connection is closed. :param amt: How much of the content to read. The generator will return up to much data per iteration, but may return less. This is particularly likely when using compressed data. However, the empty string will never be returned. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ if self.chunked and self.supports_chunked_reads(): for line in self.read_chunked(amt, decode_content=decode_content): yield line else: while not is_fp_closed(self._fp): data = self.read(amt=amt, decode_content=decode_content) if data: yield data @classmethod def from_httplib(ResponseCls, r, **response_kw): """ Given an :class:`http.client.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object. Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``. """ headers = r.msg if not isinstance(headers, HTTPHeaderDict): if six.PY2: # Python 2.7 headers = HTTPHeaderDict.from_httplib(headers) else: headers = HTTPHeaderDict(headers.items()) # HTTPResponse objects in Python 3 don't have a .strict attribute strict = getattr(r, "strict", 0) resp = ResponseCls( body=r, headers=headers, status=r.status, version=r.version, reason=r.reason, strict=strict, original_response=r, **response_kw ) return resp # Backwards-compatibility methods for http.client.HTTPResponse def getheaders(self): warnings.warn( "HTTPResponse.getheaders() is deprecated and will be removed " "in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.", category=DeprecationWarning, stacklevel=2, ) return self.headers def getheader(self, name, default=None): warnings.warn( "HTTPResponse.getheader() is deprecated and will be removed " "in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).", category=DeprecationWarning, stacklevel=2, ) return self.headers.get(name, default) # Backwards compatibility for http.cookiejar def info(self): return self.headers # Overrides from io.IOBase def close(self): if not self.closed: self._fp.close() if self._connection: self._connection.close() if not self.auto_close: io.IOBase.close(self) @property def closed(self): if not self.auto_close: return io.IOBase.closed.__get__(self) elif self._fp is None: return True elif hasattr(self._fp, "isclosed"): return self._fp.isclosed() elif hasattr(self._fp, "closed"): return self._fp.closed else: return True def fileno(self): if self._fp is None: raise IOError("HTTPResponse has no file to get a fileno from") elif hasattr(self._fp, "fileno"): return self._fp.fileno() else: raise IOError( "The file-like object this HTTPResponse is wrapped " "around has no file descriptor" ) def flush(self): if ( self._fp is not None and hasattr(self._fp, "flush") and not getattr(self._fp, "closed", False) ): return self._fp.flush() def readable(self): # This method is required for `io` module compatibility. return True def readinto(self, b): # This method is required for `io` module compatibility. temp = self.read(len(b)) if len(temp) == 0: return 0 else: b[: len(temp)] = temp return len(temp) def supports_chunked_reads(self): """ Checks if the underlying file-like object looks like a :class:`http.client.HTTPResponse` object. We do this by testing for the fp attribute. If it is present we assume it returns raw chunks as processed by read_chunked(). """ return hasattr(self._fp, "fp") def _update_chunk_length(self): # First, we'll figure out length of a chunk and then # we'll try to read it from socket. if self.chunk_left is not None: return line = self._fp.fp.readline() line = line.split(b";", 1)[0] try: self.chunk_left = int(line, 16) except ValueError: # Invalid chunked protocol response, abort. self.close() raise InvalidChunkLength(self, line) def _handle_chunk(self, amt): returned_chunk = None if amt is None: chunk = self._fp._safe_read(self.chunk_left) returned_chunk = chunk self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None elif amt < self.chunk_left: value = self._fp._safe_read(amt) self.chunk_left = self.chunk_left - amt returned_chunk = value elif amt == self.chunk_left: value = self._fp._safe_read(amt) self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None returned_chunk = value else: # amt > self.chunk_left returned_chunk = self._fp._safe_read(self.chunk_left) self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None return returned_chunk def read_chunked(self, amt=None, decode_content=None): """ Similar to :meth:`HTTPResponse.read`, but with an additional parameter: ``decode_content``. :param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ self._init_decoder() # FIXME: Rewrite this method and make it a class with a better structured logic. if not self.chunked: raise ResponseNotChunked( "Response is not chunked. " "Header 'transfer-encoding: chunked' is missing." ) if not self.supports_chunked_reads(): raise BodyNotHttplibCompatible( "Body should be http.client.HTTPResponse like. " "It should have have an fp attribute which returns raw chunks." ) with self._error_catcher(): # Don't bother reading the body of a HEAD request. if self._original_response and is_response_to_head(self._original_response): self._original_response.close() return # If a response is already read and closed # then return immediately. if self._fp.fp is None: return while True: self._update_chunk_length() if self.chunk_left == 0: break chunk = self._handle_chunk(amt) decoded = self._decode( chunk, decode_content=decode_content, flush_decoder=False ) if decoded: yield decoded if decode_content: # On CPython and PyPy, we should never need to flush the # decoder. However, on Jython we *might* need to, so # lets defensively do it anyway. decoded = self._flush_decoder() if decoded: # Platform-specific: Jython. yield decoded # Chunk content ends with \r\n: discard it. while True: line = self._fp.fp.readline() if not line: # Some sites may not end with '\r\n'. break if line == b"\r\n": break # We read everything; close the "file". if self._original_response: self._original_response.close() def geturl(self): """ Returns the URL that was the source of this response. If the request that generated this response redirected, this method will return the final redirect location. """ if self.retries is not None and len(self.retries.history): return self.retries.history[-1].redirect_location else: return self._request_url def __iter__(self): buffer = [] for chunk in self.stream(decode_content=True): if b"\n" in chunk: chunk = chunk.split(b"\n") yield b"".join(buffer) + chunk[0] + b"\n" for x in chunk[1:-1]: yield x + b"\n" if chunk[-1]: buffer = [chunk[-1]] else: buffer = [] else: buffer.append(chunk) if buffer: yield b"".join(buffer)
30,761
Python
33.72009
110
0.558694
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/exceptions.py
from __future__ import absolute_import from .packages.six.moves.http_client import IncompleteRead as httplib_IncompleteRead # Base Exceptions class HTTPError(Exception): """Base exception used by this module.""" pass class HTTPWarning(Warning): """Base warning used by this module.""" pass class PoolError(HTTPError): """Base exception for errors caused within a pool.""" def __init__(self, pool, message): self.pool = pool HTTPError.__init__(self, "%s: %s" % (pool, message)) def __reduce__(self): # For pickling purposes. return self.__class__, (None, None) class RequestError(PoolError): """Base exception for PoolErrors that have associated URLs.""" def __init__(self, pool, url, message): self.url = url PoolError.__init__(self, pool, message) def __reduce__(self): # For pickling purposes. return self.__class__, (None, self.url, None) class SSLError(HTTPError): """Raised when SSL certificate fails in an HTTPS connection.""" pass class ProxyError(HTTPError): """Raised when the connection to a proxy fails.""" def __init__(self, message, error, *args): super(ProxyError, self).__init__(message, error, *args) self.original_error = error class DecodeError(HTTPError): """Raised when automatic decoding based on Content-Type fails.""" pass class ProtocolError(HTTPError): """Raised when something unexpected happens mid-request/response.""" pass #: Renamed to ProtocolError but aliased for backwards compatibility. ConnectionError = ProtocolError # Leaf Exceptions class MaxRetryError(RequestError): """Raised when the maximum number of retries is exceeded. :param pool: The connection pool :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool` :param string url: The requested Url :param exceptions.Exception reason: The underlying error """ def __init__(self, pool, url, reason=None): self.reason = reason message = "Max retries exceeded with url: %s (Caused by %r)" % (url, reason) RequestError.__init__(self, pool, url, message) class HostChangedError(RequestError): """Raised when an existing pool gets a request for a foreign host.""" def __init__(self, pool, url, retries=3): message = "Tried to open a foreign host with url: %s" % url RequestError.__init__(self, pool, url, message) self.retries = retries class TimeoutStateError(HTTPError): """Raised when passing an invalid state to a timeout""" pass class TimeoutError(HTTPError): """Raised when a socket timeout error occurs. Catching this error will catch both :exc:`ReadTimeoutErrors <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. """ pass class ReadTimeoutError(TimeoutError, RequestError): """Raised when a socket timeout occurs while receiving data from a server""" pass # This timeout error does not have a URL attached and needs to inherit from the # base HTTPError class ConnectTimeoutError(TimeoutError): """Raised when a socket timeout occurs while connecting to a server""" pass class NewConnectionError(ConnectTimeoutError, PoolError): """Raised when we fail to establish a new connection. Usually ECONNREFUSED.""" pass class EmptyPoolError(PoolError): """Raised when a pool runs out of connections and no more are allowed.""" pass class ClosedPoolError(PoolError): """Raised when a request enters a pool after the pool has been closed.""" pass class LocationValueError(ValueError, HTTPError): """Raised when there is something wrong with a given URL input.""" pass class LocationParseError(LocationValueError): """Raised when get_host or similar fails to parse the URL input.""" def __init__(self, location): message = "Failed to parse: %s" % location HTTPError.__init__(self, message) self.location = location class URLSchemeUnknown(LocationValueError): """Raised when a URL input has an unsupported scheme.""" def __init__(self, scheme): message = "Not supported URL scheme %s" % scheme super(URLSchemeUnknown, self).__init__(message) self.scheme = scheme class ResponseError(HTTPError): """Used as a container for an error reason supplied in a MaxRetryError.""" GENERIC_ERROR = "too many error responses" SPECIFIC_ERROR = "too many {status_code} error responses" class SecurityWarning(HTTPWarning): """Warned when performing security reducing actions""" pass class SubjectAltNameWarning(SecurityWarning): """Warned when connecting to a host with a certificate missing a SAN.""" pass class InsecureRequestWarning(SecurityWarning): """Warned when making an unverified HTTPS request.""" pass class SystemTimeWarning(SecurityWarning): """Warned when system time is suspected to be wrong""" pass class InsecurePlatformWarning(SecurityWarning): """Warned when certain TLS/SSL configuration is not available on a platform.""" pass class SNIMissingWarning(HTTPWarning): """Warned when making a HTTPS request without SNI available.""" pass class DependencyWarning(HTTPWarning): """ Warned when an attempt is made to import a module with missing optional dependencies. """ pass class ResponseNotChunked(ProtocolError, ValueError): """Response needs to be chunked in order to read it as chunks.""" pass class BodyNotHttplibCompatible(HTTPError): """ Body should be :class:`http.client.HTTPResponse` like (have an fp attribute which returns raw chunks) for read_chunked(). """ pass class IncompleteRead(HTTPError, httplib_IncompleteRead): """ Response length doesn't match expected Content-Length Subclass of :class:`http.client.IncompleteRead` to allow int value for ``partial`` to avoid creating large objects on streamed reads. """ def __init__(self, partial, expected): super(IncompleteRead, self).__init__(partial, expected) def __repr__(self): return "IncompleteRead(%i bytes read, %i more expected)" % ( self.partial, self.expected, ) class InvalidChunkLength(HTTPError, httplib_IncompleteRead): """Invalid chunk length in a chunked response.""" def __init__(self, response, length): super(InvalidChunkLength, self).__init__( response.tell(), response.length_remaining ) self.response = response self.length = length def __repr__(self): return "InvalidChunkLength(got length %r, %i bytes read)" % ( self.length, self.partial, ) class InvalidHeader(HTTPError): """The header provided was somehow invalid.""" pass class ProxySchemeUnknown(AssertionError, URLSchemeUnknown): """ProxyManager does not support the supplied scheme""" # TODO(t-8ch): Stop inheriting from AssertionError in v2.0. def __init__(self, scheme): # 'localhost' is here because our URL parser parses # localhost:8080 -> scheme=localhost, remove if we fix this. if scheme == "localhost": scheme = None if scheme is None: message = "Proxy URL had no scheme, should start with http:// or https://" else: message = ( "Proxy URL had unsupported scheme %s, should use http:// or https://" % scheme ) super(ProxySchemeUnknown, self).__init__(message) class ProxySchemeUnsupported(ValueError): """Fetching HTTPS resources through HTTPS proxies is unsupported""" pass class HeaderParsingError(HTTPError): """Raised by assert_header_parsing, but we convert it to a log.warning statement.""" def __init__(self, defects, unparsed_data): message = "%s, unparsed data: %r" % (defects or "Unknown", unparsed_data) super(HeaderParsingError, self).__init__(message) class UnrewindableBodyError(HTTPError): """urllib3 encountered an error when trying to rewind a body""" pass
8,217
Python
24.364197
88
0.670439
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/poolmanager.py
from __future__ import absolute_import import collections import functools import logging from ._collections import RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, ProxySchemeUnsupported, URLSchemeUnknown, ) from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.url import parse_url __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example:: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None proxy_config = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools) # Locally set the pool classes and keys so other PoolManagers can # override them. self.pool_classes_by_scheme = pool_classes_by_scheme self.key_fn_by_scheme = key_fn_by_scheme.copy() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.clear() # Return False to re-raise any potential exceptions return False def _new_pool(self, scheme, host, port, request_context=None): """ Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and any additional pool keyword arguments. If ``request_context`` is provided, it is provided as keyword arguments to the pool class used. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = self.pool_classes_by_scheme[scheme] if request_context is None: request_context = self.connection_pool_kw.copy() # Although the context has everything necessary to create the pool, # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. for key in ("scheme", "host", "port"): request_context.pop(key, None) if scheme == "http": for kw in SSL_KEYWORDS: request_context.pop(kw, None) return pool_cls(host, port, **request_context) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context["scheme"] = scheme or "http" if not port: port = port_by_scheme.get(request_context["scheme"].lower(), 80) request_context["port"] = port request_context["host"] = host return self.connection_from_context(request_context) def connection_from_context(self, request_context): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ scheme = request_context["scheme"].lower() pool_key_constructor = self.key_fn_by_scheme.get(scheme) if not pool_key_constructor: raise URLSchemeUnknown(scheme) pool_key = pool_key_constructor(request_context) return self.connection_from_pool_key(pool_key, request_context=request_context) def connection_from_pool_key(self, pool_key, request_context=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. ``pool_key`` should be a namedtuple that only contains immutable objects. At a minimum it must have the ``scheme``, ``host``, and ``port`` fields. """ with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context["scheme"] host = request_context["host"] port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https: raise ProxySchemeUnsupported( "Contacting HTTPS destinations through HTTPS proxies " "'via CONNECT tunnels' is not supported in Python 2" ) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) self._validate_proxy_scheme_url_selection(u.scheme) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw["assert_same_host"] = False kw["redirect"] = False if "headers" not in kw: kw["headers"] = self.headers.copy() if self._proxy_requires_url_absolute_form(u): response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) # RFC 7231, Section 6.4.4 if response.status == 303: method = "GET" retries = kw.get("retries") if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect) # Strip headers marked as unsafe to forward to the redirected location. # Check remove_headers_on_redirect to avoid a potential network call within # conn.is_same_host() which may use socket.gethostbyname() in the future. if retries.remove_headers_on_redirect and not conn.is_same_host( redirect_location ): headers = list(six.iterkeys(kw["headers"])) for header in headers: if header.lower() in retries.remove_headers_on_redirect: kw["headers"].pop(header, None) try: retries = retries.increment(method, url, response=response, _pool=conn) except MaxRetryError: if retries.raise_on_redirect: response.drain_conn() raise return response kw["retries"] = retries kw["redirect"] = redirect log.info("Redirecting %s -> %s", url, redirect_location) response.drain_conn() return self.urlopen(method, redirect_location, **kw) class ProxyManager(PoolManager): """ Behaves just like :class:`PoolManager`, but sends all requests through the defined proxy, using the CONNECT method for HTTPS URLs. :param proxy_url: The URL of the proxy to be used. :param proxy_headers: A dictionary containing headers that will be sent to the proxy. In case of HTTP they are being sent with each request, while in the HTTPS/CONNECT case they are sent only once. Could be used for proxy authentication. :param proxy_ssl_context: The proxy SSL context is used to establish the TLS connection to the proxy when using HTTPS proxies. :param use_forwarding_for_https: (Defaults to False) If set to True will forward requests to the HTTPS proxy to be made on behalf of the client instead of creating a TLS tunnel via the CONNECT method. **Enabling this flag means that request and response headers and content will be visible from the HTTPS proxy** whereas tunneling keeps request and response headers and content private. IP address, target hostname, SNI, and port are always visible to an HTTPS proxy even when this flag is disabled. Example: >>> proxy = urllib3.ProxyManager('http://localhost:3128/') >>> r1 = proxy.request('GET', 'http://google.com/') >>> r2 = proxy.request('GET', 'http://httpbin.org/') >>> len(proxy.pools) 1 >>> r3 = proxy.request('GET', 'https://httpbin.org/') >>> r4 = proxy.request('GET', 'https://twitter.com/') >>> len(proxy.pools) 3 """ def __init__( self, proxy_url, num_pools=10, headers=None, proxy_headers=None, proxy_ssl_context=None, use_forwarding_for_https=False, **connection_pool_kw ): if isinstance(proxy_url, HTTPConnectionPool): proxy_url = "%s://%s:%i" % ( proxy_url.scheme, proxy_url.host, proxy_url.port, ) proxy = parse_url(proxy_url) if proxy.scheme not in ("http", "https"): raise ProxySchemeUnknown(proxy.scheme) if not proxy.port: port = port_by_scheme.get(proxy.scheme, 80) proxy = proxy._replace(port=port) self.proxy = proxy self.proxy_headers = proxy_headers or {} self.proxy_ssl_context = proxy_ssl_context self.proxy_config = ProxyConfig(proxy_ssl_context, use_forwarding_for_https) connection_pool_kw["_proxy"] = self.proxy connection_pool_kw["_proxy_headers"] = self.proxy_headers connection_pool_kw["_proxy_config"] = self.proxy_config super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw) def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): if scheme == "https": return super(ProxyManager, self).connection_from_host( host, port, scheme, pool_kwargs=pool_kwargs ) return super(ProxyManager, self).connection_from_host( self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs ) def _set_proxy_headers(self, url, headers=None): """ Sets headers needed by proxies: specifically, the Accept and Host headers. Only sets headers not provided by the user. """ headers_ = {"Accept": "*/*"} netloc = parse_url(url).netloc if netloc: headers_["Host"] = netloc if headers: headers_.update(headers) return headers_ def urlopen(self, method, url, redirect=True, **kw): "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." u = parse_url(url) if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme): # For connections using HTTP CONNECT, httplib sets the necessary # headers on the CONNECT to the proxy. If we're not using CONNECT, # we'll definitely need to set 'Host' at the very least. headers = kw.get("headers", self.headers) kw["headers"] = self._set_proxy_headers(url, headers) return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) def proxy_from_url(url, **kw): return ProxyManager(proxy_url=url, **kw)
19,752
Python
35.715613
100
0.626519
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/fields.py
from __future__ import absolute_import import email.utils import mimetypes import re from .packages import six def guess_content_type(filename, default="application/octet-stream"): """ Guess the "Content-Type" of a file. :param filename: The filename to guess the "Content-Type" of using :mod:`mimetypes`. :param default: If no "Content-Type" can be guessed, default to `default`. """ if filename: return mimetypes.guess_type(filename)[0] or default return default def format_header_param_rfc2231(name, value): """ Helper function to format and quote a single header parameter using the strategy defined in RFC 2231. Particularly useful for header parameters which might contain non-ASCII values, like file names. This follows `RFC 2388 Section 4.4 <https://tools.ietf.org/html/rfc2388#section-4.4>`_. :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as ``bytes`` or `str``. :ret: An RFC-2231-formatted unicode string. """ if isinstance(value, six.binary_type): value = value.decode("utf-8") if not any(ch in value for ch in '"\\\r\n'): result = u'%s="%s"' % (name, value) try: result.encode("ascii") except (UnicodeEncodeError, UnicodeDecodeError): pass else: return result if six.PY2: # Python 2: value = value.encode("utf-8") # encode_rfc2231 accepts an encoded string and returns an ascii-encoded # string in Python 2 but accepts and returns unicode strings in Python 3 value = email.utils.encode_rfc2231(value, "utf-8") value = "%s*=%s" % (name, value) if six.PY2: # Python 2: value = value.decode("utf-8") return value _HTML5_REPLACEMENTS = { u"\u0022": u"%22", # Replace "\" with "\\". u"\u005C": u"\u005C\u005C", } # All control characters from 0x00 to 0x1F *except* 0x1B. _HTML5_REPLACEMENTS.update( { six.unichr(cc): u"%{:02X}".format(cc) for cc in range(0x00, 0x1F + 1) if cc not in (0x1B,) } ) def _replace_multiple(value, needles_and_replacements): def replacer(match): return needles_and_replacements[match.group(0)] pattern = re.compile( r"|".join([re.escape(needle) for needle in needles_and_replacements.keys()]) ) result = pattern.sub(replacer, value) return result def format_header_param_html5(name, value): """ Helper function to format and quote a single header parameter using the HTML5 strategy. Particularly useful for header parameters which might contain non-ASCII values, like file names. This follows the `HTML5 Working Draft Section 4.10.22.7`_ and matches the behavior of curl and modern browsers. .. _HTML5 Working Draft Section 4.10.22.7: https://w3c.github.io/html/sec-forms.html#multipart-form-data :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as ``bytes`` or `str``. :ret: A unicode string, stripped of troublesome characters. """ if isinstance(value, six.binary_type): value = value.decode("utf-8") value = _replace_multiple(value, _HTML5_REPLACEMENTS) return u'%s="%s"' % (name, value) # For backwards-compatibility. format_header_param = format_header_param_html5 class RequestField(object): """ A data container for request body parameters. :param name: The name of this request field. Must be unicode. :param data: The data/value body. :param filename: An optional filename of the request field. Must be unicode. :param headers: An optional dict-like object of headers to initially use for the field. :param header_formatter: An optional callable that is used to encode and format the headers. By default, this is :func:`format_header_param_html5`. """ def __init__( self, name, data, filename=None, headers=None, header_formatter=format_header_param_html5, ): self._name = name self._filename = filename self.data = data self.headers = {} if headers: self.headers = dict(headers) self.header_formatter = header_formatter @classmethod def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5): """ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. Supports constructing :class:`~urllib3.fields.RequestField` from parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example:: 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', Field names and filenames must be unicode. """ if isinstance(value, tuple): if len(value) == 3: filename, data, content_type = value else: filename, data = value content_type = guess_content_type(filename) else: filename = None content_type = None data = value request_param = cls( fieldname, data, filename=filename, header_formatter=header_formatter ) request_param.make_multipart(content_type=content_type) return request_param def _render_part(self, name, value): """ Overridable helper function to format a single header parameter. By default, this calls ``self.header_formatter``. :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as a unicode string. """ return self.header_formatter(name, value) def _render_parts(self, header_parts): """ Helper function to format and quote a single header. Useful for single headers that are composed of multiple items. E.g., 'Content-Disposition' fields. :param header_parts: A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format as `k1="v1"; k2="v2"; ...`. """ parts = [] iterable = header_parts if isinstance(header_parts, dict): iterable = header_parts.items() for name, value in iterable: if value is not None: parts.append(self._render_part(name, value)) return u"; ".join(parts) def render_headers(self): """ Renders the headers for this request field. """ lines = [] sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"] for sort_key in sort_keys: if self.headers.get(sort_key, False): lines.append(u"%s: %s" % (sort_key, self.headers[sort_key])) for header_name, header_value in self.headers.items(): if header_name not in sort_keys: if header_value: lines.append(u"%s: %s" % (header_name, header_value)) lines.append(u"\r\n") return u"\r\n".join(lines) def make_multipart( self, content_disposition=None, content_type=None, content_location=None ): """ Makes this request field into a multipart request field. This method overrides "Content-Disposition", "Content-Type" and "Content-Location" headers to the request parameter. :param content_type: The 'Content-Type' of the request body. :param content_location: The 'Content-Location' of the request body. """ self.headers["Content-Disposition"] = content_disposition or u"form-data" self.headers["Content-Disposition"] += u"; ".join( [ u"", self._render_parts( ((u"name", self._name), (u"filename", self._filename)) ), ] ) self.headers["Content-Type"] = content_type self.headers["Content-Location"] = content_location
8,579
Python
30.2
88
0.602984
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/_collections.py
from __future__ import absolute_import try: from collections.abc import Mapping, MutableMapping except ImportError: from collections import Mapping, MutableMapping try: from threading import RLock except ImportError: # Platform-specific: No threads available class RLock: def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): pass from collections import OrderedDict from .exceptions import InvalidHeader from .packages import six from .packages.six import iterkeys, itervalues __all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"] _Null = object() class RecentlyUsedContainer(MutableMapping): """ Provides a thread-safe dict-like container which maintains up to ``maxsize`` keys while throwing away the least-recently-used keys beyond ``maxsize``. :param maxsize: Maximum number of recent elements to retain. :param dispose_func: Every time an item is evicted from the container, ``dispose_func(value)`` is called. Callback which will get called """ ContainerCls = OrderedDict def __init__(self, maxsize=10, dispose_func=None): self._maxsize = maxsize self.dispose_func = dispose_func self._container = self.ContainerCls() self.lock = RLock() def __getitem__(self, key): # Re-insert the item, moving it to the end of the eviction line. with self.lock: item = self._container.pop(key) self._container[key] = item return item def __setitem__(self, key, value): evicted_value = _Null with self.lock: # Possibly evict the existing value of 'key' evicted_value = self._container.get(key, _Null) self._container[key] = value # If we didn't evict an existing value, we might have to evict the # least recently used item from the beginning of the container. if len(self._container) > self._maxsize: _key, evicted_value = self._container.popitem(last=False) if self.dispose_func and evicted_value is not _Null: self.dispose_func(evicted_value) def __delitem__(self, key): with self.lock: value = self._container.pop(key) if self.dispose_func: self.dispose_func(value) def __len__(self): with self.lock: return len(self._container) def __iter__(self): raise NotImplementedError( "Iteration over this class is unlikely to be threadsafe." ) def clear(self): with self.lock: # Copy pointers to all values, then wipe the mapping values = list(itervalues(self._container)) self._container.clear() if self.dispose_func: for value in values: self.dispose_func(value) def keys(self): with self.lock: return list(iterkeys(self._container)) class HTTPHeaderDict(MutableMapping): """ :param headers: An iterable of field-value pairs. Must not contain multiple field names when compared case-insensitively. :param kwargs: Additional field-value pairs to pass in to ``dict.update``. A ``dict`` like container for storing HTTP Headers. Field names are stored and compared case-insensitively in compliance with RFC 7230. Iteration provides the first case-sensitive key seen for each case-insensitive pair. Using ``__setitem__`` syntax overwrites fields that compare equal case-insensitively in order to maintain ``dict``'s api. For fields that compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add`` in a loop. If multiple fields that are equal case-insensitively are passed to the constructor or ``.update``, the behavior is undefined and some will be lost. >>> headers = HTTPHeaderDict() >>> headers.add('Set-Cookie', 'foo=bar') >>> headers.add('set-cookie', 'baz=quxx') >>> headers['content-length'] = '7' >>> headers['SET-cookie'] 'foo=bar, baz=quxx' >>> headers['Content-Length'] '7' """ def __init__(self, headers=None, **kwargs): super(HTTPHeaderDict, self).__init__() self._container = OrderedDict() if headers is not None: if isinstance(headers, HTTPHeaderDict): self._copy_from(headers) else: self.extend(headers) if kwargs: self.extend(kwargs) def __setitem__(self, key, val): self._container[key.lower()] = [key, val] return self._container[key.lower()] def __getitem__(self, key): val = self._container[key.lower()] return ", ".join(val[1:]) def __delitem__(self, key): del self._container[key.lower()] def __contains__(self, key): return key.lower() in self._container def __eq__(self, other): if not isinstance(other, Mapping) and not hasattr(other, "keys"): return False if not isinstance(other, type(self)): other = type(self)(other) return dict((k.lower(), v) for k, v in self.itermerged()) == dict( (k.lower(), v) for k, v in other.itermerged() ) def __ne__(self, other): return not self.__eq__(other) if six.PY2: # Python 2 iterkeys = MutableMapping.iterkeys itervalues = MutableMapping.itervalues __marker = object() def __len__(self): return len(self._container) def __iter__(self): # Only provide the originally cased names for vals in self._container.values(): yield vals[0] def pop(self, key, default=__marker): """D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. """ # Using the MutableMapping function directly fails due to the private marker. # Using ordinary dict.pop would expose the internal structures. # So let's reinvent the wheel. try: value = self[key] except KeyError: if default is self.__marker: raise return default else: del self[key] return value def discard(self, key): try: del self[key] except KeyError: pass def add(self, key, val): """Adds a (name, value) pair, doesn't overwrite the value if it already exists. >>> headers = HTTPHeaderDict(foo='bar') >>> headers.add('Foo', 'baz') >>> headers['foo'] 'bar, baz' """ key_lower = key.lower() new_vals = [key, val] # Keep the common case aka no item present as fast as possible vals = self._container.setdefault(key_lower, new_vals) if new_vals is not vals: vals.append(val) def extend(self, *args, **kwargs): """Generic import function for any type of header-like object. Adapted version of MutableMapping.update in order to insert items with self.add instead of self.__setitem__ """ if len(args) > 1: raise TypeError( "extend() takes at most 1 positional " "arguments ({0} given)".format(len(args)) ) other = args[0] if len(args) >= 1 else () if isinstance(other, HTTPHeaderDict): for key, val in other.iteritems(): self.add(key, val) elif isinstance(other, Mapping): for key in other: self.add(key, other[key]) elif hasattr(other, "keys"): for key in other.keys(): self.add(key, other[key]) else: for key, value in other: self.add(key, value) for key, value in kwargs.items(): self.add(key, value) def getlist(self, key, default=__marker): """Returns a list of all the values for the named field. Returns an empty list if the key doesn't exist.""" try: vals = self._container[key.lower()] except KeyError: if default is self.__marker: return [] return default else: return vals[1:] # Backwards compatibility for httplib getheaders = getlist getallmatchingheaders = getlist iget = getlist # Backwards compatibility for http.cookiejar get_all = getlist def __repr__(self): return "%s(%s)" % (type(self).__name__, dict(self.itermerged())) def _copy_from(self, other): for key in other: val = other.getlist(key) if isinstance(val, list): # Don't need to convert tuples val = list(val) self._container[key.lower()] = [key] + val def copy(self): clone = type(self)() clone._copy_from(self) return clone def iteritems(self): """Iterate over all header lines, including duplicate ones.""" for key in self: vals = self._container[key.lower()] for val in vals[1:]: yield vals[0], val def itermerged(self): """Iterate over all headers, merging duplicate ones together.""" for key in self: val = self._container[key.lower()] yield val[0], ", ".join(val[1:]) def items(self): return list(self.iteritems()) @classmethod def from_httplib(cls, message): # Python 2 """Read headers from a Python 2 httplib message object.""" # python2.7 does not expose a proper API for exporting multiheaders # efficiently. This function re-reads raw lines from the message # object and extracts the multiheaders properly. obs_fold_continued_leaders = (" ", "\t") headers = [] for line in message.headers: if line.startswith(obs_fold_continued_leaders): if not headers: # We received a header line that starts with OWS as described # in RFC-7230 S3.2.4. This indicates a multiline header, but # there exists no previous header to which we can attach it. raise InvalidHeader( "Header continuation with no previous header: %s" % line ) else: key, value = headers[-1] headers[-1] = (key, value + " " + line.strip()) continue key, value = line.split(":", 1) headers.append((key, value.strip())) return cls(headers)
10,811
Python
30.988166
86
0.575895
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/request.py
from __future__ import absolute_import from .filepost import encode_multipart_formdata from .packages.six.moves.urllib.parse import urlencode __all__ = ["RequestMethods"] class RequestMethods(object): """ Convenience mixin for classes who implement a :meth:`urlopen` method, such as :class:`urllib3.HTTPConnectionPool` and :class:`urllib3.PoolManager`. Provides behavior for making common types of HTTP request methods and decides which type of request field encoding to use. Specifically, :meth:`.request_encode_url` is for sending requests whose fields are encoded in the URL (such as GET, HEAD, DELETE). :meth:`.request_encode_body` is for sending requests whose fields are encoded in the *body* of the request using multipart or www-form-urlencoded (such as for POST, PUT, PATCH). :meth:`.request` is for making any kind of request, it will look up the appropriate encoding format and use one of the above two methods to make the request. Initializer parameters: :param headers: Headers to include with all requests, unless other headers are given explicitly. """ _encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"} def __init__(self, headers=None): self.headers = headers or {} def urlopen( self, method, url, body=None, headers=None, encode_multipart=True, multipart_boundary=None, **kw ): # Abstract raise NotImplementedError( "Classes extending RequestMethods must implement " "their own ``urlopen`` method." ) def request(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the appropriate encoding of ``fields`` based on the ``method`` used. This is a convenience method that requires the least amount of manual effort. It can be used in most situations, while still having the option to drop down to more specific methods when necessary, such as :meth:`request_encode_url`, :meth:`request_encode_body`, or even the lowest level :meth:`urlopen`. """ method = method.upper() urlopen_kw["request_url"] = url if method in self._encode_url_methods: return self.request_encode_url( method, url, fields=fields, headers=headers, **urlopen_kw ) else: return self.request_encode_body( method, url, fields=fields, headers=headers, **urlopen_kw ) def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the url. This is useful for request methods like GET, HEAD, DELETE, etc. """ if headers is None: headers = self.headers extra_kw = {"headers": headers} extra_kw.update(urlopen_kw) if fields: url += "?" + urlencode(fields) return self.urlopen(method, url, **extra_kw) def request_encode_body( self, method, url, fields=None, headers=None, encode_multipart=True, multipart_boundary=None, **urlopen_kw ): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the body. This is useful for request methods like POST, PUT, PATCH, etc. When ``encode_multipart=True`` (default), then :func:`urllib3.encode_multipart_formdata` is used to encode the payload with the appropriate content type. Otherwise :func:`urllib.parse.urlencode` is used with the 'application/x-www-form-urlencoded' content type. Multipart encoding must be used when posting files, and it's reasonably safe to use it in other times too. However, it may break request signing, such as with OAuth. Supports an optional ``fields`` parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example:: fields = { 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', } When uploading a file, providing a filename (the first parameter of the tuple) is optional but recommended to best mimic behavior of browsers. Note that if ``headers`` are supplied, the 'Content-Type' header will be overwritten because it depends on the dynamic random boundary string which is used to compose the body of the request. The random boundary string can be explicitly set with the ``multipart_boundary`` parameter. """ if headers is None: headers = self.headers extra_kw = {"headers": {}} if fields: if "body" in urlopen_kw: raise TypeError( "request got values for both 'fields' and 'body', can only specify one." ) if encode_multipart: body, content_type = encode_multipart_formdata( fields, boundary=multipart_boundary ) else: body, content_type = ( urlencode(fields), "application/x-www-form-urlencoded", ) extra_kw["body"] = body extra_kw["headers"] = {"Content-Type": content_type} extra_kw["headers"].update(headers) extra_kw.update(urlopen_kw) return self.urlopen(method, url, **extra_kw)
5,985
Python
34.005848
92
0.600334
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/__init__.py
""" Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more """ from __future__ import absolute_import # Set default logging handler to avoid "No handler found" warnings. import logging import warnings from logging import NullHandler from . import exceptions from ._version import __version__ from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url from .filepost import encode_multipart_formdata from .poolmanager import PoolManager, ProxyManager, proxy_from_url from .response import HTTPResponse from .util.request import make_headers from .util.retry import Retry from .util.timeout import Timeout from .util.url import get_host # === NOTE TO REPACKAGERS AND VENDORS === # Please delete this block, this logic is only # for urllib3 being distributed via PyPI. # See: https://github.com/urllib3/urllib3/issues/2680 try: import urllib3_secure_extra # type: ignore # noqa: F401 except ImportError: pass else: warnings.warn( "'urllib3[secure]' extra is deprecated and will be removed " "in a future release of urllib3 2.x. Read more in this issue: " "https://github.com/urllib3/urllib3/issues/2680", category=DeprecationWarning, stacklevel=2, ) __author__ = "Andrey Petrov ([email protected])" __license__ = "MIT" __version__ = __version__ __all__ = ( "HTTPConnectionPool", "HTTPSConnectionPool", "PoolManager", "ProxyManager", "HTTPResponse", "Retry", "Timeout", "add_stderr_logger", "connection_from_url", "disable_warnings", "encode_multipart_formdata", "get_host", "make_headers", "proxy_from_url", ) logging.getLogger(__name__).addHandler(NullHandler()) def add_stderr_logger(level=logging.DEBUG): """ Helper for quickly adding a StreamHandler to the logger. Useful for debugging. Returns the handler after adding it. """ # This method needs to be in this __init__.py to get the __name__ correct # even if urllib3 is vendored within another package. logger = logging.getLogger(__name__) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s")) logger.addHandler(handler) logger.setLevel(level) logger.debug("Added a stderr logging handler to logger: %s", __name__) return handler # ... Clean up. del NullHandler # All warning filters *must* be appended unless you're really certain that they # shouldn't be: otherwise, it's very hard for users to use most Python # mechanisms to silence them. # SecurityWarning's always go off by default. warnings.simplefilter("always", exceptions.SecurityWarning, append=True) # SubjectAltNameWarning's should go off once per host warnings.simplefilter("default", exceptions.SubjectAltNameWarning, append=True) # InsecurePlatformWarning's don't vary between requests, so we keep it default. warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True) # SNIMissingWarnings should go off only once. warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True) def disable_warnings(category=exceptions.HTTPWarning): """ Helper for quickly disabling all urllib3 warnings. """ warnings.simplefilter("ignore", category)
3,333
Python
31.368932
99
0.727273
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/connectionpool.py
from __future__ import absolute_import import errno import logging import re import socket import sys import warnings from socket import error as SocketError from socket import timeout as SocketTimeout from .connection import ( BaseSSLError, BrokenPipeError, DummyConnection, HTTPConnection, HTTPException, HTTPSConnection, VerifiedHTTPSConnection, port_by_scheme, ) from .exceptions import ( ClosedPoolError, EmptyPoolError, HeaderParsingError, HostChangedError, InsecureRequestWarning, LocationValueError, MaxRetryError, NewConnectionError, ProtocolError, ProxyError, ReadTimeoutError, SSLError, TimeoutError, ) from .packages import six from .packages.six.moves import queue from .request import RequestMethods from .response import HTTPResponse from .util.connection import is_connection_dropped from .util.proxy import connection_requires_http_tunnel from .util.queue import LifoQueue from .util.request import set_file_position from .util.response import assert_header_parsing from .util.retry import Retry from .util.ssl_match_hostname import CertificateError from .util.timeout import Timeout from .util.url import Url, _encode_target from .util.url import _normalize_host as normalize_host from .util.url import get_host, parse_url try: # Platform-specific: Python 3 import weakref weakref_finalize = weakref.finalize except AttributeError: # Platform-specific: Python 2 from .packages.backports.weakref_finalize import weakref_finalize xrange = six.moves.xrange log = logging.getLogger(__name__) _Default = object() # Pool objects class ConnectionPool(object): """ Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. .. note:: ConnectionPool.urlopen() does not normalize or percent-encode target URIs which is useful if your target server doesn't support percent-encoded target URIs. """ scheme = None QueueCls = LifoQueue def __init__(self, host, port=None): if not host: raise LocationValueError("No host specified.") self.host = _normalize_host(host, scheme=self.scheme) self._proxy_host = host.lower() self.port = port def __str__(self): return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() # Return False to re-raise any potential exceptions return False def close(self): """ Close all pooled connections and disable the pool. """ pass # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} class HTTPConnectionPool(ConnectionPool, RequestMethods): """ Thread-safe connection pool for one host. :param host: Host used for this HTTP Connection (e.g. "localhost"), passed into :class:`http.client.HTTPConnection`. :param port: Port used for this HTTP Connection (None is equivalent to 80), passed into :class:`http.client.HTTPConnection`. :param strict: Causes BadStatusLine to be raised if the status line can't be parsed as a valid HTTP/1.0 or 1.1 status line, passed into :class:`http.client.HTTPConnection`. .. note:: Only works in Python 2. This parameter is ignored in Python 3. :param timeout: Socket timeout in seconds for each individual connection. This can be a float or integer, which sets the timeout for the HTTP request, or an instance of :class:`urllib3.util.Timeout` which gives you more fine-grained control over request timeouts. After the constructor has been parsed, this is always a `urllib3.util.Timeout` object. :param maxsize: Number of connections to save that can be reused. More than 1 is useful in multithreaded situations. If ``block`` is set to False, more connections will be created but they will not be saved once they've been used. :param block: If set to True, no more than ``maxsize`` connections will be used at a time. When no free connections are available, the call will block until a connection has been released. This is a useful side effect for particular multithreaded situations where one does not want to use more than maxsize connections per host to prevent flooding. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param retries: Retry configuration to use by default with requests in this pool. :param _proxy: Parsed proxy URL, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param _proxy_headers: A dictionary with proxy headers, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param \\**conn_kw: Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, :class:`urllib3.connection.HTTPSConnection` instances. """ scheme = "http" ConnectionCls = HTTPConnection ResponseCls = HTTPResponse def __init__( self, host, port=None, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, headers=None, retries=None, _proxy=None, _proxy_headers=None, _proxy_config=None, **conn_kw ): ConnectionPool.__init__(self, host, port) RequestMethods.__init__(self, headers) self.strict = strict if not isinstance(timeout, Timeout): timeout = Timeout.from_float(timeout) if retries is None: retries = Retry.DEFAULT self.timeout = timeout self.retries = retries self.pool = self.QueueCls(maxsize) self.block = block self.proxy = _proxy self.proxy_headers = _proxy_headers or {} self.proxy_config = _proxy_config # Fill the queue up so that doing get() on it will block properly for _ in xrange(maxsize): self.pool.put(None) # These are mostly for testing and debugging purposes. self.num_connections = 0 self.num_requests = 0 self.conn_kw = conn_kw if self.proxy: # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. # We cannot know if the user has added default socket options, so we cannot replace the # list. self.conn_kw.setdefault("socket_options", []) self.conn_kw["proxy"] = self.proxy self.conn_kw["proxy_config"] = self.proxy_config # Do not pass 'self' as callback to 'finalize'. # Then the 'finalize' would keep an endless living (leak) to self. # By just passing a reference to the pool allows the garbage collector # to free self if nobody else has a reference to it. pool = self.pool # Close all the HTTPConnections in the pool before the # HTTPConnectionPool object is garbage collected. weakref_finalize(self, _close_pool_connections, pool) def _new_conn(self): """ Return a fresh :class:`HTTPConnection`. """ self.num_connections += 1 log.debug( "Starting new HTTP connection (%d): %s:%s", self.num_connections, self.host, self.port or "80", ) conn = self.ConnectionCls( host=self.host, port=self.port, timeout=self.timeout.connect_timeout, strict=self.strict, **self.conn_kw ) return conn def _get_conn(self, timeout=None): """ Get a connection. Will return a pooled connection if one is available. If no connections are available and :prop:`.block` is ``False``, then a fresh connection is returned. :param timeout: Seconds to wait before giving up and raising :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and :prop:`.block` is ``True``. """ conn = None try: conn = self.pool.get(block=self.block, timeout=timeout) except AttributeError: # self.pool is None raise ClosedPoolError(self, "Pool is closed.") except queue.Empty: if self.block: raise EmptyPoolError( self, "Pool reached maximum size and no more connections are allowed.", ) pass # Oh well, we'll create a new connection then # If this is a persistent connection, check if it got disconnected if conn and is_connection_dropped(conn): log.debug("Resetting dropped connection: %s", self.host) conn.close() if getattr(conn, "auto_open", 1) == 0: # This is a proxied connection that has been mutated by # http.client._tunnel() and cannot be reused (since it would # attempt to bypass the proxy) conn = None return conn or self._new_conn() def _put_conn(self, conn): """ Put a connection back into the pool. :param conn: Connection object for the current host and port as returned by :meth:`._new_conn` or :meth:`._get_conn`. If the pool is already full, the connection is closed and discarded because we exceeded maxsize. If connections are discarded frequently, then maxsize should be increased. If the pool is closed, then the connection will be closed and discarded. """ try: self.pool.put(conn, block=False) return # Everything is dandy, done. except AttributeError: # self.pool is None. pass except queue.Full: # This should never happen if self.block == True log.warning( "Connection pool is full, discarding connection: %s. Connection pool size: %s", self.host, self.pool.qsize(), ) # Connection never got put back into the pool, close it. if conn: conn.close() def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ pass def _prepare_proxy(self, conn): # Nothing to do for HTTP connections. pass def _get_timeout(self, timeout): """Helper that always returns a :class:`urllib3.util.Timeout`""" if timeout is _Default: return self.timeout.clone() if isinstance(timeout, Timeout): return timeout.clone() else: # User passed us an int/float. This is for backwards compatibility, # can be removed later return Timeout.from_float(timeout) def _raise_timeout(self, err, url, timeout_value): """Is the error actually a timeout? Will raise a ReadTimeout or pass""" if isinstance(err, SocketTimeout): raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % timeout_value ) # See the above comment about EAGAIN in Python 3. In Python 2 we have # to specifically catch it and throw the timeout error if hasattr(err, "errno") and err.errno in _blocking_errnos: raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % timeout_value ) # Catch possible read timeouts thrown as SSL errors. If not the # case, rethrow the original. We need to do this because of: # http://bugs.python.org/issue10272 if "timed out" in str(err) or "did not complete (read)" in str( err ): # Python < 2.7.4 raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % timeout_value ) def _make_request( self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw ): """ Perform a request on a given urllib connection object taken from our pool. :param conn: a connection from one of our connection pools :param timeout: Socket timeout in seconds for the request. This can be a float or integer, which will set the same timeout value for the socket connect and the socket read, or an instance of :class:`urllib3.util.Timeout`, which gives you more fine-grained control over your timeouts. """ self.num_requests += 1 timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout) # Trigger any extra validation we need to do. try: self._validate_conn(conn) except (SocketTimeout, BaseSSLError) as e: # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) raise # conn.request() calls http.client.*.request, not the method in # urllib3.request. It also calls makefile (recv) on the socket. try: if chunked: conn.request_chunked(method, url, **httplib_request_kw) else: conn.request(method, url, **httplib_request_kw) # We are swallowing BrokenPipeError (errno.EPIPE) since the server is # legitimately able to close the connection after sending a valid response. # With this behaviour, the received response is still readable. except BrokenPipeError: # Python 3 pass except IOError as e: # Python 2 and macOS/Linux # EPIPE and ESHUTDOWN are BrokenPipeError on Python 2, and EPROTOTYPE is needed on macOS # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/ if e.errno not in { errno.EPIPE, errno.ESHUTDOWN, errno.EPROTOTYPE, }: raise # Reset the timeout for the recv() on the socket read_timeout = timeout_obj.read_timeout # App Engine doesn't have a sock attr if getattr(conn, "sock", None): # In Python 3 socket.py will catch EAGAIN and return None when you # try and read into the file pointer created by http.client, which # instead raises a BadStatusLine exception. Instead of catching # the exception and assuming all BadStatusLine exceptions are read # timeouts, check for a zero timeout before making the request. if read_timeout == 0: raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % read_timeout ) if read_timeout is Timeout.DEFAULT_TIMEOUT: conn.sock.settimeout(socket.getdefaulttimeout()) else: # None or a value conn.sock.settimeout(read_timeout) # Receive the response from the server try: try: # Python 2.7, use buffering of HTTP responses httplib_response = conn.getresponse(buffering=True) except TypeError: # Python 3 try: httplib_response = conn.getresponse() except BaseException as e: # Remove the TypeError from the exception chain in # Python 3 (including for exceptions like SystemExit). # Otherwise it looks like a bug in the code. six.raise_from(e, None) except (SocketTimeout, BaseSSLError, SocketError) as e: self._raise_timeout(err=e, url=url, timeout_value=read_timeout) raise # AppEngine doesn't have a version attr. http_version = getattr(conn, "_http_vsn_str", "HTTP/?") log.debug( '%s://%s:%s "%s %s %s" %s %s', self.scheme, self.host, self.port, method, url, http_version, httplib_response.status, httplib_response.length, ) try: assert_header_parsing(httplib_response.msg) except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 log.warning( "Failed to parse headers (url=%s): %s", self._absolute_url(url), hpe, exc_info=True, ) return httplib_response def _absolute_url(self, path): return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url def close(self): """ Close all pooled connections and disable the pool. """ if self.pool is None: return # Disable access to the pool old_pool, self.pool = self.pool, None # Close all the HTTPConnections in the pool. _close_pool_connections(old_pool) def is_same_host(self, url): """ Check if the given ``url`` is a member of the same host as this connection pool. """ if url.startswith("/"): return True # TODO: Add optional support for socket.gethostbyname checking. scheme, host, port = get_host(url) if host is not None: host = _normalize_host(host, scheme=scheme) # Use explicit default port for comparison when none is given if self.port and not port: port = port_by_scheme.get(scheme) elif not self.port and port == port_by_scheme.get(scheme): port = None return (scheme, host, port) == (self.scheme, self.host, self.port) def urlopen( self, method, url, body=None, headers=None, retries=None, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, chunked=False, body_pos=None, **response_kw ): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param url: The URL to perform the request on. :param body: Data to send in the request body, either :class:`str`, :class:`bytes`, an iterable of :class:`str`/:class:`bytes`, or a file-like object. :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Configure the number of retries to allow before raising a :class:`~urllib3.exceptions.MaxRetryError` exception. Pass ``None`` to retry until you receive a response. Pass a :class:`~urllib3.util.retry.Retry` object for fine-grained control over different types of retries. Pass an integer number to retry connection errors that many times, but no other types of errors. Pass zero to never retry. If ``False``, then retries are disabled and any exception is raised immediately. Also, instead of raising a MaxRetryError on redirects, the redirect response will be returned. :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307, 308). Each redirect counts as a retry. Disabling retries will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When ``False``, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. It may be a float (in seconds) or an instance of :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param chunked: If True, urllib3 will send the body using chunked transfer encoding. Otherwise, urllib3 will send the body using the standard content-length form. Defaults to False. :param int body_pos: Position to seek to in file-like body in the event of a retry or redirect. Typically this won't need to be set because urllib3 will auto-populate the value when needed. :param \\**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib` """ parsed_url = parse_url(url) destination_scheme = parsed_url.scheme if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get("preload_content", True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) # Ensure that the URL we're connecting to is properly encoded if url.startswith("/"): url = six.ensure_str(_encode_target(url)) else: url = six.ensure_str(parsed_url.url) conn = None # Track whether `conn` needs to be released before # returning/raising/recursing. Update this variable if necessary, and # leave `release_conn` constant throughout the function. That way, if # the function recurses, the original value of `release_conn` will be # passed down into the recursive call, and its value will be respected. # # See issue #651 [1] for details. # # [1] <https://github.com/urllib3/urllib3/issues/651> release_this_conn = release_conn http_tunnel_required = connection_requires_http_tunnel( self.proxy, self.proxy_config, destination_scheme ) # Merge the proxy headers. Only done when not using HTTP CONNECT. We # have to copy the headers dict so we can safely change it without those # changes being reflected in anyone else's copy. if not http_tunnel_required: headers = headers.copy() headers.update(self.proxy_headers) # Must keep the exception bound to a separate variable or else Python 3 # complains about UnboundLocalError. err = None # Keep track of whether we cleanly exited the except block. This # ensures we do proper cleanup in finally. clean_exit = False # Rewind body position, if needed. Record current position # for future rewinds in the event of a redirect/retry. body_pos = set_file_position(body, body_pos) try: # Request a connection from the queue. timeout_obj = self._get_timeout(timeout) conn = self._get_conn(timeout=pool_timeout) conn.timeout = timeout_obj.connect_timeout is_new_proxy_conn = self.proxy is not None and not getattr( conn, "sock", None ) if is_new_proxy_conn and http_tunnel_required: self._prepare_proxy(conn) # Make the request on the httplib connection object. httplib_response = self._make_request( conn, method, url, timeout=timeout_obj, body=body, headers=headers, chunked=chunked, ) # If we're going to release the connection in ``finally:``, then # the response doesn't need to know about the connection. Otherwise # it will also try to release it and we'll have a double-release # mess. response_conn = conn if not release_conn else None # Pass method to Response for length checking response_kw["request_method"] = method # Import httplib's response into our own wrapper object response = self.ResponseCls.from_httplib( httplib_response, pool=self, connection=response_conn, retries=retries, **response_kw ) # Everything went great! clean_exit = True except EmptyPoolError: # Didn't get a connection from the pool, no need to clean up clean_exit = True release_this_conn = False raise except ( TimeoutError, HTTPException, SocketError, ProtocolError, BaseSSLError, SSLError, CertificateError, ) as e: # Discard the connection for these exceptions. It will be # replaced during the next _get_conn() call. clean_exit = False def _is_ssl_error_message_from_http_proxy(ssl_error): # We're trying to detect the message 'WRONG_VERSION_NUMBER' but # SSLErrors are kinda all over the place when it comes to the message, # so we try to cover our bases here! message = " ".join(re.split("[^a-z]", str(ssl_error).lower())) return ( "wrong version number" in message or "unknown protocol" in message ) # Try to detect a common user error with proxies which is to # set an HTTP proxy to be HTTPS when it should be 'http://' # (ie {'http': 'http://proxy', 'https': 'https://proxy'}) # Instead we add a nice error message and point to a URL. if ( isinstance(e, BaseSSLError) and self.proxy and _is_ssl_error_message_from_http_proxy(e) and conn.proxy and conn.proxy.scheme == "https" ): e = ProxyError( "Your proxy appears to only use HTTP and not HTTPS, " "try changing your proxy URL to be HTTP. See: " "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" "#https-proxy-error-http-proxy", SSLError(e), ) elif isinstance(e, (BaseSSLError, CertificateError)): e = SSLError(e) elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError("Cannot connect to proxy.", e) elif isinstance(e, (SocketError, HTTPException)): e = ProtocolError("Connection aborted.", e) retries = retries.increment( method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] ) retries.sleep() # Keep track of the error for the retry warning. err = e finally: if not clean_exit: # We hit some kind of exception, handled or otherwise. We need # to throw the connection away unless explicitly told not to. # Close the connection, set the variable to None, and make sure # we put the None back in the pool to avoid leaking it. conn = conn and conn.close() release_this_conn = True if release_this_conn: # Put the connection back to be reused. If the connection is # expired then it will be None, which will get replaced with a # fresh connection during _get_conn. self._put_conn(conn) if not conn: # Try again log.warning( "Retrying (%r) after connection broken by '%r': %s", retries, err, url ) return self.urlopen( method, url, body, headers, retries, redirect, assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, chunked=chunked, body_pos=body_pos, **response_kw ) # Handle redirect? redirect_location = redirect and response.get_redirect_location() if redirect_location: if response.status == 303: method = "GET" try: retries = retries.increment(method, url, response=response, _pool=self) except MaxRetryError: if retries.raise_on_redirect: response.drain_conn() raise return response response.drain_conn() retries.sleep_for_retry(response) log.debug("Redirecting %s -> %s", url, redirect_location) return self.urlopen( method, redirect_location, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, chunked=chunked, body_pos=body_pos, **response_kw ) # Check if we should retry the HTTP response. has_retry_after = bool(response.headers.get("Retry-After")) if retries.is_retry(method, response.status, has_retry_after): try: retries = retries.increment(method, url, response=response, _pool=self) except MaxRetryError: if retries.raise_on_status: response.drain_conn() raise return response response.drain_conn() retries.sleep(response) log.debug("Retry: %s", url) return self.urlopen( method, url, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, chunked=chunked, body_pos=body_pos, **response_kw ) return response class HTTPSConnectionPool(HTTPConnectionPool): """ Same as :class:`.HTTPConnectionPool`, but HTTPS. :class:`.HTTPSConnection` uses one of ``assert_fingerprint``, ``assert_hostname`` and ``host`` in this order to verify connections. If ``assert_hostname`` is False, no verification is done. The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl` is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket into an SSL socket. """ scheme = "https" ConnectionCls = HTTPSConnection def __init__( self, host, port=None, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, headers=None, retries=None, _proxy=None, _proxy_headers=None, key_file=None, cert_file=None, cert_reqs=None, key_password=None, ca_certs=None, ssl_version=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None, **conn_kw ): HTTPConnectionPool.__init__( self, host, port, strict, timeout, maxsize, block, headers, retries, _proxy, _proxy_headers, **conn_kw ) self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.key_password = key_password self.ca_certs = ca_certs self.ca_cert_dir = ca_cert_dir self.ssl_version = ssl_version self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint def _prepare_conn(self, conn): """ Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` and establish the tunnel if proxy is used. """ if isinstance(conn, VerifiedHTTPSConnection): conn.set_cert( key_file=self.key_file, key_password=self.key_password, cert_file=self.cert_file, cert_reqs=self.cert_reqs, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint, ) conn.ssl_version = self.ssl_version return conn def _prepare_proxy(self, conn): """ Establishes a tunnel connection through HTTP CONNECT. Tunnel connection is established early because otherwise httplib would improperly set Host: header to proxy's IP:port. """ conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers) if self.proxy.scheme == "https": conn.tls_in_tls_required = True conn.connect() def _new_conn(self): """ Return a fresh :class:`http.client.HTTPSConnection`. """ self.num_connections += 1 log.debug( "Starting new HTTPS connection (%d): %s:%s", self.num_connections, self.host, self.port or "443", ) if not self.ConnectionCls or self.ConnectionCls is DummyConnection: raise SSLError( "Can't connect to HTTPS URL because the SSL module is not available." ) actual_host = self.host actual_port = self.port if self.proxy is not None: actual_host = self.proxy.host actual_port = self.proxy.port conn = self.ConnectionCls( host=actual_host, port=actual_port, timeout=self.timeout.connect_timeout, strict=self.strict, cert_file=self.cert_file, key_file=self.key_file, key_password=self.key_password, **self.conn_kw ) return self._prepare_conn(conn) def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ super(HTTPSConnectionPool, self)._validate_conn(conn) # Force connect early to allow us to validate the connection. if not getattr(conn, "sock", None): # AppEngine might not have `.sock` conn.connect() if not conn.is_verified: warnings.warn( ( "Unverified HTTPS request is being made to host '%s'. " "Adding certificate verification is strongly advised. See: " "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" "#ssl-warnings" % conn.host ), InsecureRequestWarning, ) if getattr(conn, "proxy_is_verified", None) is False: warnings.warn( ( "Unverified HTTPS connection done to an HTTPS proxy. " "Adding certificate verification is strongly advised. See: " "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" "#ssl-warnings" ), InsecureRequestWarning, ) def connection_from_url(url, **kw): """ Given a url, return an :class:`.ConnectionPool` instance of its host. This is a shortcut for not having to parse out the scheme, host, and port of the url before creating an :class:`.ConnectionPool` instance. :param url: Absolute URL string that must include the scheme. Port is optional. :param \\**kw: Passes additional parameters to the constructor of the appropriate :class:`.ConnectionPool`. Useful for specifying things like timeout, maxsize, headers, etc. Example:: >>> conn = connection_from_url('http://google.com/') >>> r = conn.request('GET', '/') """ scheme, host, port = get_host(url) port = port or port_by_scheme.get(scheme, 80) if scheme == "https": return HTTPSConnectionPool(host, port=port, **kw) else: return HTTPConnectionPool(host, port=port, **kw) def _normalize_host(host, scheme): """ Normalize hosts for comparisons and use with sockets. """ host = normalize_host(host, scheme) # httplib doesn't like it when we include brackets in IPv6 addresses # Specifically, if we include brackets but also pass the port then # httplib crazily doubles up the square brackets on the Host header. # Instead, we need to make sure we never pass ``None`` as the port. # However, for backward compatibility reasons we can't actually # *assert* that. See http://bugs.python.org/issue28539 if host.startswith("[") and host.endswith("]"): host = host[1:-1] return host def _close_pool_connections(pool): """Drains a queue of connections and closes each one.""" try: while True: conn = pool.get(block=False) if conn: conn.close() except queue.Empty: pass # Done.
39,990
Python
34.296558
106
0.58052
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/appengine.py
""" This module provides a pool manager that uses Google App Engine's `URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_. Example usage:: from urllib3 import PoolManager from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox if is_appengine_sandbox(): # AppEngineManager uses AppEngine's URLFetch API behind the scenes http = AppEngineManager() else: # PoolManager uses a socket-level API behind the scenes http = PoolManager() r = http.request('GET', 'https://google.com/') There are `limitations <https://cloud.google.com/appengine/docs/python/\ urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be the best choice for your application. There are three options for using urllib3 on Google App Engine: 1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is cost-effective in many circumstances as long as your usage is within the limitations. 2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets. Sockets also have `limitations and restrictions <https://cloud.google.com/appengine/docs/python/sockets/\ #limitations-and-restrictions>`_ and have a lower free quota than URLFetch. To use sockets, be sure to specify the following in your ``app.yaml``:: env_variables: GAE_USE_SOCKETS_HTTPLIB : 'true' 3. If you are using `App Engine Flexible <https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard :class:`PoolManager` without any configuration or special environment variables. """ from __future__ import absolute_import import io import logging import warnings from ..exceptions import ( HTTPError, HTTPWarning, MaxRetryError, ProtocolError, SSLError, TimeoutError, ) from ..packages.six.moves.urllib.parse import urljoin from ..request import RequestMethods from ..response import HTTPResponse from ..util.retry import Retry from ..util.timeout import Timeout from . import _appengine_environ try: from google.appengine.api import urlfetch except ImportError: urlfetch = None log = logging.getLogger(__name__) class AppEnginePlatformWarning(HTTPWarning): pass class AppEnginePlatformError(HTTPError): pass class AppEngineManager(RequestMethods): """ Connection manager for Google App Engine sandbox applications. This manager uses the URLFetch service directly instead of using the emulated httplib, and is subject to URLFetch limitations as described in the App Engine documentation `here <https://cloud.google.com/appengine/docs/python/urlfetch>`_. Notably it will raise an :class:`AppEnginePlatformError` if: * URLFetch is not available. * If you attempt to use this on App Engine Flexible, as full socket support is available. * If a request size is more than 10 megabytes. * If a response size is more than 32 megabytes. * If you use an unsupported request method such as OPTIONS. Beyond those cases, it will raise normal urllib3 errors. """ def __init__( self, headers=None, retries=None, validate_certificate=True, urlfetch_retries=True, ): if not urlfetch: raise AppEnginePlatformError( "URLFetch is not available in this environment." ) warnings.warn( "urllib3 is using URLFetch on Google App Engine sandbox instead " "of sockets. To use sockets directly instead of URLFetch see " "https://urllib3.readthedocs.io/en/1.26.x/reference/urllib3.contrib.html.", AppEnginePlatformWarning, ) RequestMethods.__init__(self, headers) self.validate_certificate = validate_certificate self.urlfetch_retries = urlfetch_retries self.retries = retries or Retry.DEFAULT def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # Return False to re-raise any potential exceptions return False def urlopen( self, method, url, body=None, headers=None, retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT, **response_kw ): retries = self._get_retries(retries, redirect) try: follow_redirects = redirect and retries.redirect != 0 and retries.total response = urlfetch.fetch( url, payload=body, method=method, headers=headers or {}, allow_truncated=False, follow_redirects=self.urlfetch_retries and follow_redirects, deadline=self._get_absolute_timeout(timeout), validate_certificate=self.validate_certificate, ) except urlfetch.DeadlineExceededError as e: raise TimeoutError(self, e) except urlfetch.InvalidURLError as e: if "too large" in str(e): raise AppEnginePlatformError( "URLFetch request too large, URLFetch only " "supports requests up to 10mb in size.", e, ) raise ProtocolError(e) except urlfetch.DownloadError as e: if "Too many redirects" in str(e): raise MaxRetryError(self, url, reason=e) raise ProtocolError(e) except urlfetch.ResponseTooLargeError as e: raise AppEnginePlatformError( "URLFetch response too large, URLFetch only supports" "responses up to 32mb in size.", e, ) except urlfetch.SSLCertificateError as e: raise SSLError(e) except urlfetch.InvalidMethodError as e: raise AppEnginePlatformError( "URLFetch does not support method: %s" % method, e ) http_response = self._urlfetch_response_to_http_response( response, retries=retries, **response_kw ) # Handle redirect? redirect_location = redirect and http_response.get_redirect_location() if redirect_location: # Check for redirect response if self.urlfetch_retries and retries.raise_on_redirect: raise MaxRetryError(self, url, "too many redirects") else: if http_response.status == 303: method = "GET" try: retries = retries.increment( method, url, response=http_response, _pool=self ) except MaxRetryError: if retries.raise_on_redirect: raise MaxRetryError(self, url, "too many redirects") return http_response retries.sleep_for_retry(http_response) log.debug("Redirecting %s -> %s", url, redirect_location) redirect_url = urljoin(url, redirect_location) return self.urlopen( method, redirect_url, body, headers, retries=retries, redirect=redirect, timeout=timeout, **response_kw ) # Check if we should retry the HTTP response. has_retry_after = bool(http_response.headers.get("Retry-After")) if retries.is_retry(method, http_response.status, has_retry_after): retries = retries.increment(method, url, response=http_response, _pool=self) log.debug("Retry: %s", url) retries.sleep(http_response) return self.urlopen( method, url, body=body, headers=headers, retries=retries, redirect=redirect, timeout=timeout, **response_kw ) return http_response def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw): if is_prod_appengine(): # Production GAE handles deflate encoding automatically, but does # not remove the encoding header. content_encoding = urlfetch_resp.headers.get("content-encoding") if content_encoding == "deflate": del urlfetch_resp.headers["content-encoding"] transfer_encoding = urlfetch_resp.headers.get("transfer-encoding") # We have a full response's content, # so let's make sure we don't report ourselves as chunked data. if transfer_encoding == "chunked": encodings = transfer_encoding.split(",") encodings.remove("chunked") urlfetch_resp.headers["transfer-encoding"] = ",".join(encodings) original_response = HTTPResponse( # In order for decoding to work, we must present the content as # a file-like object. body=io.BytesIO(urlfetch_resp.content), msg=urlfetch_resp.header_msg, headers=urlfetch_resp.headers, status=urlfetch_resp.status_code, **response_kw ) return HTTPResponse( body=io.BytesIO(urlfetch_resp.content), headers=urlfetch_resp.headers, status=urlfetch_resp.status_code, original_response=original_response, **response_kw ) def _get_absolute_timeout(self, timeout): if timeout is Timeout.DEFAULT_TIMEOUT: return None # Defer to URLFetch's default. if isinstance(timeout, Timeout): if timeout._read is not None or timeout._connect is not None: warnings.warn( "URLFetch does not support granular timeout settings, " "reverting to total or default URLFetch timeout.", AppEnginePlatformWarning, ) return timeout.total return timeout def _get_retries(self, retries, redirect): if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if retries.connect or retries.read or retries.redirect: warnings.warn( "URLFetch only supports total retries and does not " "recognize connect, read, or redirect retry parameters.", AppEnginePlatformWarning, ) return retries # Alias methods from _appengine_environ to maintain public API interface. is_appengine = _appengine_environ.is_appengine is_appengine_sandbox = _appengine_environ.is_appengine_sandbox is_local_appengine = _appengine_environ.is_local_appengine is_prod_appengine = _appengine_environ.is_prod_appengine is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms
11,012
Python
33.961905
88
0.612241
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/socks.py
# -*- coding: utf-8 -*- """ This module contains provisional support for SOCKS proxies from within urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and SOCKS5. To enable its functionality, either install PySocks or install this module with the ``socks`` extra. The SOCKS implementation supports the full range of urllib3 features. It also supports the following SOCKS features: - SOCKS4A (``proxy_url='socks4a://...``) - SOCKS4 (``proxy_url='socks4://...``) - SOCKS5 with remote DNS (``proxy_url='socks5h://...``) - SOCKS5 with local DNS (``proxy_url='socks5://...``) - Usernames and passwords for the SOCKS proxy .. note:: It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in your ``proxy_url`` to ensure that DNS resolution is done from the remote server instead of client-side when connecting to a domain name. SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5 supports IPv4, IPv6, and domain names. When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url`` will be sent as the ``userid`` section of the SOCKS request: .. code-block:: python proxy_url="socks4a://<userid>@proxy-host" When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion of the ``proxy_url`` will be sent as the username/password to authenticate with the proxy: .. code-block:: python proxy_url="socks5h://<username>:<password>@proxy-host" """ from __future__ import absolute_import try: import socks except ImportError: import warnings from ..exceptions import DependencyWarning warnings.warn( ( "SOCKS support in urllib3 requires the installation of optional " "dependencies: specifically, PySocks. For more information, see " "https://urllib3.readthedocs.io/en/1.26.x/contrib.html#socks-proxies" ), DependencyWarning, ) raise from socket import error as SocketError from socket import timeout as SocketTimeout from ..connection import HTTPConnection, HTTPSConnection from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool from ..exceptions import ConnectTimeoutError, NewConnectionError from ..poolmanager import PoolManager from ..util.url import parse_url try: import ssl except ImportError: ssl = None class SOCKSConnection(HTTPConnection): """ A plain-text HTTP connection that connects via a SOCKS proxy. """ def __init__(self, *args, **kwargs): self._socks_options = kwargs.pop("_socks_options") super(SOCKSConnection, self).__init__(*args, **kwargs) def _new_conn(self): """ Establish a new connection via the SOCKS proxy. """ extra_kw = {} if self.source_address: extra_kw["source_address"] = self.source_address if self.socket_options: extra_kw["socket_options"] = self.socket_options try: conn = socks.create_connection( (self.host, self.port), proxy_type=self._socks_options["socks_version"], proxy_addr=self._socks_options["proxy_host"], proxy_port=self._socks_options["proxy_port"], proxy_username=self._socks_options["username"], proxy_password=self._socks_options["password"], proxy_rdns=self._socks_options["rdns"], timeout=self.timeout, **extra_kw ) except SocketTimeout: raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout), ) except socks.ProxyError as e: # This is fragile as hell, but it seems to be the only way to raise # useful errors here. if e.socket_err: error = e.socket_err if isinstance(error, SocketTimeout): raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout), ) else: raise NewConnectionError( self, "Failed to establish a new connection: %s" % error ) else: raise NewConnectionError( self, "Failed to establish a new connection: %s" % e ) except SocketError as e: # Defensive: PySocks should catch all these. raise NewConnectionError( self, "Failed to establish a new connection: %s" % e ) return conn # We don't need to duplicate the Verified/Unverified distinction from # urllib3/connection.py here because the HTTPSConnection will already have been # correctly set to either the Verified or Unverified form by that module. This # means the SOCKSHTTPSConnection will automatically be the correct type. class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): pass class SOCKSHTTPConnectionPool(HTTPConnectionPool): ConnectionCls = SOCKSConnection class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): ConnectionCls = SOCKSHTTPSConnection class SOCKSProxyManager(PoolManager): """ A version of the urllib3 ProxyManager that routes connections via the defined SOCKS proxy. """ pool_classes_by_scheme = { "http": SOCKSHTTPConnectionPool, "https": SOCKSHTTPSConnectionPool, } def __init__( self, proxy_url, username=None, password=None, num_pools=10, headers=None, **connection_pool_kw ): parsed = parse_url(proxy_url) if username is None and password is None and parsed.auth is not None: split = parsed.auth.split(":") if len(split) == 2: username, password = split if parsed.scheme == "socks5": socks_version = socks.PROXY_TYPE_SOCKS5 rdns = False elif parsed.scheme == "socks5h": socks_version = socks.PROXY_TYPE_SOCKS5 rdns = True elif parsed.scheme == "socks4": socks_version = socks.PROXY_TYPE_SOCKS4 rdns = False elif parsed.scheme == "socks4a": socks_version = socks.PROXY_TYPE_SOCKS4 rdns = True else: raise ValueError("Unable to determine SOCKS version from %s" % proxy_url) self.proxy_url = proxy_url socks_options = { "socks_version": socks_version, "proxy_host": parsed.host, "proxy_port": parsed.port, "username": username, "password": password, "rdns": rdns, } connection_pool_kw["_socks_options"] = socks_options super(SOCKSProxyManager, self).__init__( num_pools, headers, **connection_pool_kw ) self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
7,097
Python
31.709677
85
0.61364
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/pyopenssl.py
""" TLS with SNI_-support for Python 2. Follow these instructions if you would like to verify TLS certificates in Python 2. Note, the default libraries do *not* do certificate checking; you need to do additional work to validate certificates yourself. This needs the following packages installed: * `pyOpenSSL`_ (tested with 16.0.0) * `cryptography`_ (minimum 1.3.4, from pyopenssl) * `idna`_ (minimum 2.0, from cryptography) However, pyopenssl depends on cryptography, which depends on idna, so while we use all three directly here we end up having relatively few packages required. You can install them with the following command: .. code-block:: bash $ python -m pip install pyopenssl cryptography idna To activate certificate checking, call :func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code before you begin making HTTP requests. This can be done in a ``sitecustomize`` module, or at any other time before your application begins using ``urllib3``, like this: .. code-block:: python try: import urllib3.contrib.pyopenssl urllib3.contrib.pyopenssl.inject_into_urllib3() except ImportError: pass Now you can use :mod:`urllib3` as you normally would, and it will support SNI when the required modules are installed. Activating this module also has the positive side effect of disabling SSL/TLS compression in Python 2 (see `CRIME attack`_). .. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication .. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) .. _pyopenssl: https://www.pyopenssl.org .. _cryptography: https://cryptography.io .. _idna: https://github.com/kjd/idna """ from __future__ import absolute_import import OpenSSL.crypto import OpenSSL.SSL from cryptography import x509 from cryptography.hazmat.backends.openssl import backend as openssl_backend try: from cryptography.x509 import UnsupportedExtension except ImportError: # UnsupportedExtension is gone in cryptography >= 2.1.0 class UnsupportedExtension(Exception): pass from io import BytesIO from socket import error as SocketError from socket import timeout try: # Platform-specific: Python 2 from socket import _fileobject except ImportError: # Platform-specific: Python 3 _fileobject = None from ..packages.backports.makefile import backport_makefile import logging import ssl import sys import warnings from .. import util from ..packages import six from ..util.ssl_ import PROTOCOL_TLS_CLIENT warnings.warn( "'urllib3.contrib.pyopenssl' module is deprecated and will be removed " "in a future release of urllib3 2.x. Read more in this issue: " "https://github.com/urllib3/urllib3/issues/2680", category=DeprecationWarning, stacklevel=2, ) __all__ = ["inject_into_urllib3", "extract_from_urllib3"] # SNI always works. HAS_SNI = True # Map from urllib3 to PyOpenSSL compatible parameter-values. _openssl_versions = { util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD, PROTOCOL_TLS_CLIENT: OpenSSL.SSL.SSLv23_METHOD, ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, } if hasattr(ssl, "PROTOCOL_SSLv3") and hasattr(OpenSSL.SSL, "SSLv3_METHOD"): _openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD if hasattr(ssl, "PROTOCOL_TLSv1_1") and hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"): _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD if hasattr(ssl, "PROTOCOL_TLSv1_2") and hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"): _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD _stdlib_to_openssl_verify = { ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, } _openssl_to_stdlib_verify = dict((v, k) for k, v in _stdlib_to_openssl_verify.items()) # OpenSSL will only write 16K at a time SSL_WRITE_BLOCKSIZE = 16384 orig_util_HAS_SNI = util.HAS_SNI orig_util_SSLContext = util.ssl_.SSLContext log = logging.getLogger(__name__) def inject_into_urllib3(): "Monkey-patch urllib3 with PyOpenSSL-backed SSL-support." _validate_dependencies_met() util.SSLContext = PyOpenSSLContext util.ssl_.SSLContext = PyOpenSSLContext util.HAS_SNI = HAS_SNI util.ssl_.HAS_SNI = HAS_SNI util.IS_PYOPENSSL = True util.ssl_.IS_PYOPENSSL = True def extract_from_urllib3(): "Undo monkey-patching by :func:`inject_into_urllib3`." util.SSLContext = orig_util_SSLContext util.ssl_.SSLContext = orig_util_SSLContext util.HAS_SNI = orig_util_HAS_SNI util.ssl_.HAS_SNI = orig_util_HAS_SNI util.IS_PYOPENSSL = False util.ssl_.IS_PYOPENSSL = False def _validate_dependencies_met(): """ Verifies that PyOpenSSL's package-level dependencies have been met. Throws `ImportError` if they are not met. """ # Method added in `cryptography==1.1`; not available in older versions from cryptography.x509.extensions import Extensions if getattr(Extensions, "get_extension_for_class", None) is None: raise ImportError( "'cryptography' module missing required functionality. " "Try upgrading to v1.3.4 or newer." ) # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509 # attribute is only present on those versions. from OpenSSL.crypto import X509 x509 = X509() if getattr(x509, "_x509", None) is None: raise ImportError( "'pyOpenSSL' module missing required functionality. " "Try upgrading to v0.14 or newer." ) def _dnsname_to_stdlib(name): """ Converts a dNSName SubjectAlternativeName field to the form used by the standard library on the given Python version. Cryptography produces a dNSName as a unicode string that was idna-decoded from ASCII bytes. We need to idna-encode that string to get it back, and then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8). If the name cannot be idna-encoded then we return None signalling that the name given should be skipped. """ def idna_encode(name): """ Borrowed wholesale from the Python Cryptography Project. It turns out that we can't just safely call `idna.encode`: it can explode for wildcard names. This avoids that problem. """ import idna try: for prefix in [u"*.", u"."]: if name.startswith(prefix): name = name[len(prefix) :] return prefix.encode("ascii") + idna.encode(name) return idna.encode(name) except idna.core.IDNAError: return None # Don't send IPv6 addresses through the IDNA encoder. if ":" in name: return name name = idna_encode(name) if name is None: return None elif sys.version_info >= (3, 0): name = name.decode("utf-8") return name def get_subj_alt_name(peer_cert): """ Given an PyOpenSSL certificate, provides all the subject alternative names. """ # Pass the cert to cryptography, which has much better APIs for this. if hasattr(peer_cert, "to_cryptography"): cert = peer_cert.to_cryptography() else: der = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, peer_cert) cert = x509.load_der_x509_certificate(der, openssl_backend) # We want to find the SAN extension. Ask Cryptography to locate it (it's # faster than looping in Python) try: ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value except x509.ExtensionNotFound: # No such extension, return the empty list. return [] except ( x509.DuplicateExtension, UnsupportedExtension, x509.UnsupportedGeneralNameType, UnicodeError, ) as e: # A problem has been found with the quality of the certificate. Assume # no SAN field is present. log.warning( "A problem was encountered with the certificate that prevented " "urllib3 from finding the SubjectAlternativeName field. This can " "affect certificate validation. The error was %s", e, ) return [] # We want to return dNSName and iPAddress fields. We need to cast the IPs # back to strings because the match_hostname function wants them as # strings. # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8 # decoded. This is pretty frustrating, but that's what the standard library # does with certificates, and so we need to attempt to do the same. # We also want to skip over names which cannot be idna encoded. names = [ ("DNS", name) for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName)) if name is not None ] names.extend( ("IP Address", str(name)) for name in ext.get_values_for_type(x509.IPAddress) ) return names class WrappedSocket(object): """API-compatibility wrapper for Python OpenSSL's Connection-class. Note: _makefile_refs, _drop() and _reuse() are needed for the garbage collector of pypy. """ def __init__(self, connection, socket, suppress_ragged_eofs=True): self.connection = connection self.socket = socket self.suppress_ragged_eofs = suppress_ragged_eofs self._makefile_refs = 0 self._closed = False def fileno(self): return self.socket.fileno() # Copy-pasted from Python 3.5 source code def _decref_socketios(self): if self._makefile_refs > 0: self._makefile_refs -= 1 if self._closed: self.close() def recv(self, *args, **kwargs): try: data = self.connection.recv(*args, **kwargs) except OpenSSL.SSL.SysCallError as e: if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"): return b"" else: raise SocketError(str(e)) except OpenSSL.SSL.ZeroReturnError: if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: return b"" else: raise except OpenSSL.SSL.WantReadError: if not util.wait_for_read(self.socket, self.socket.gettimeout()): raise timeout("The read operation timed out") else: return self.recv(*args, **kwargs) # TLS 1.3 post-handshake authentication except OpenSSL.SSL.Error as e: raise ssl.SSLError("read error: %r" % e) else: return data def recv_into(self, *args, **kwargs): try: return self.connection.recv_into(*args, **kwargs) except OpenSSL.SSL.SysCallError as e: if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"): return 0 else: raise SocketError(str(e)) except OpenSSL.SSL.ZeroReturnError: if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: return 0 else: raise except OpenSSL.SSL.WantReadError: if not util.wait_for_read(self.socket, self.socket.gettimeout()): raise timeout("The read operation timed out") else: return self.recv_into(*args, **kwargs) # TLS 1.3 post-handshake authentication except OpenSSL.SSL.Error as e: raise ssl.SSLError("read error: %r" % e) def settimeout(self, timeout): return self.socket.settimeout(timeout) def _send_until_done(self, data): while True: try: return self.connection.send(data) except OpenSSL.SSL.WantWriteError: if not util.wait_for_write(self.socket, self.socket.gettimeout()): raise timeout() continue except OpenSSL.SSL.SysCallError as e: raise SocketError(str(e)) def sendall(self, data): total_sent = 0 while total_sent < len(data): sent = self._send_until_done( data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE] ) total_sent += sent def shutdown(self): # FIXME rethrow compatible exceptions should we ever use this self.connection.shutdown() def close(self): if self._makefile_refs < 1: try: self._closed = True return self.connection.close() except OpenSSL.SSL.Error: return else: self._makefile_refs -= 1 def getpeercert(self, binary_form=False): x509 = self.connection.get_peer_certificate() if not x509: return x509 if binary_form: return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509) return { "subject": ((("commonName", x509.get_subject().CN),),), "subjectAltName": get_subj_alt_name(x509), } def version(self): return self.connection.get_protocol_version_name() def _reuse(self): self._makefile_refs += 1 def _drop(self): if self._makefile_refs < 1: self.close() else: self._makefile_refs -= 1 if _fileobject: # Platform-specific: Python 2 def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) else: # Platform-specific: Python 3 makefile = backport_makefile WrappedSocket.makefile = makefile class PyOpenSSLContext(object): """ I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible for translating the interface of the standard library ``SSLContext`` object to calls into PyOpenSSL. """ def __init__(self, protocol): self.protocol = _openssl_versions[protocol] self._ctx = OpenSSL.SSL.Context(self.protocol) self._options = 0 self.check_hostname = False @property def options(self): return self._options @options.setter def options(self, value): self._options = value self._ctx.set_options(value) @property def verify_mode(self): return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()] @verify_mode.setter def verify_mode(self, value): self._ctx.set_verify(_stdlib_to_openssl_verify[value], _verify_callback) def set_default_verify_paths(self): self._ctx.set_default_verify_paths() def set_ciphers(self, ciphers): if isinstance(ciphers, six.text_type): ciphers = ciphers.encode("utf-8") self._ctx.set_cipher_list(ciphers) def load_verify_locations(self, cafile=None, capath=None, cadata=None): if cafile is not None: cafile = cafile.encode("utf-8") if capath is not None: capath = capath.encode("utf-8") try: self._ctx.load_verify_locations(cafile, capath) if cadata is not None: self._ctx.load_verify_locations(BytesIO(cadata)) except OpenSSL.SSL.Error as e: raise ssl.SSLError("unable to load trusted certificates: %r" % e) def load_cert_chain(self, certfile, keyfile=None, password=None): self._ctx.use_certificate_chain_file(certfile) if password is not None: if not isinstance(password, six.binary_type): password = password.encode("utf-8") self._ctx.set_passwd_cb(lambda *_: password) self._ctx.use_privatekey_file(keyfile or certfile) def set_alpn_protocols(self, protocols): protocols = [six.ensure_binary(p) for p in protocols] return self._ctx.set_alpn_protos(protocols) def wrap_socket( self, sock, server_side=False, do_handshake_on_connect=True, suppress_ragged_eofs=True, server_hostname=None, ): cnx = OpenSSL.SSL.Connection(self._ctx, sock) if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3 server_hostname = server_hostname.encode("utf-8") if server_hostname is not None: cnx.set_tlsext_host_name(server_hostname) cnx.set_connect_state() while True: try: cnx.do_handshake() except OpenSSL.SSL.WantReadError: if not util.wait_for_read(sock, sock.gettimeout()): raise timeout("select timed out") continue except OpenSSL.SSL.Error as e: raise ssl.SSLError("bad handshake: %r" % e) break return WrappedSocket(cnx, sock) def _verify_callback(cnx, x509, err_no, err_depth, return_code): return err_no == 0
17,055
Python
31.863198
88
0.640575
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/securetransport.py
""" SecureTranport support for urllib3 via ctypes. This makes platform-native TLS available to urllib3 users on macOS without the use of a compiler. This is an important feature because the Python Package Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL that ships with macOS is not capable of doing TLSv1.2. The only way to resolve this is to give macOS users an alternative solution to the problem, and that solution is to use SecureTransport. We use ctypes here because this solution must not require a compiler. That's because pip is not allowed to require a compiler either. This is not intended to be a seriously long-term solution to this problem. The hope is that PEP 543 will eventually solve this issue for us, at which point we can retire this contrib module. But in the short term, we need to solve the impending tire fire that is Python on Mac without this kind of contrib module. So...here we are. To use this module, simply import and inject it:: import urllib3.contrib.securetransport urllib3.contrib.securetransport.inject_into_urllib3() Happy TLSing! This code is a bastardised version of the code found in Will Bond's oscrypto library. An enormous debt is owed to him for blazing this trail for us. For that reason, this code should be considered to be covered both by urllib3's license and by oscrypto's: .. code-block:: Copyright (c) 2015-2016 Will Bond <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import absolute_import import contextlib import ctypes import errno import os.path import shutil import socket import ssl import struct import threading import weakref import six from .. import util from ..util.ssl_ import PROTOCOL_TLS_CLIENT from ._securetransport.bindings import CoreFoundation, Security, SecurityConst from ._securetransport.low_level import ( _assert_no_error, _build_tls_unknown_ca_alert, _cert_array_from_pem, _create_cfstring_array, _load_client_cert_chain, _temporary_keychain, ) try: # Platform-specific: Python 2 from socket import _fileobject except ImportError: # Platform-specific: Python 3 _fileobject = None from ..packages.backports.makefile import backport_makefile __all__ = ["inject_into_urllib3", "extract_from_urllib3"] # SNI always works HAS_SNI = True orig_util_HAS_SNI = util.HAS_SNI orig_util_SSLContext = util.ssl_.SSLContext # This dictionary is used by the read callback to obtain a handle to the # calling wrapped socket. This is a pretty silly approach, but for now it'll # do. I feel like I should be able to smuggle a handle to the wrapped socket # directly in the SSLConnectionRef, but for now this approach will work I # guess. # # We need to lock around this structure for inserts, but we don't do it for # reads/writes in the callbacks. The reasoning here goes as follows: # # 1. It is not possible to call into the callbacks before the dictionary is # populated, so once in the callback the id must be in the dictionary. # 2. The callbacks don't mutate the dictionary, they only read from it, and # so cannot conflict with any of the insertions. # # This is good: if we had to lock in the callbacks we'd drastically slow down # the performance of this code. _connection_refs = weakref.WeakValueDictionary() _connection_ref_lock = threading.Lock() # Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over # for no better reason than we need *a* limit, and this one is right there. SSL_WRITE_BLOCKSIZE = 16384 # This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to # individual cipher suites. We need to do this because this is how # SecureTransport wants them. CIPHER_SUITES = [ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, SecurityConst.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, SecurityConst.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA, SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA, SecurityConst.TLS_AES_256_GCM_SHA384, SecurityConst.TLS_AES_128_GCM_SHA256, SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256, SecurityConst.TLS_AES_128_CCM_8_SHA256, SecurityConst.TLS_AES_128_CCM_SHA256, SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256, SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA, SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA, ] # Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of # TLSv1 and a high of TLSv1.2. For everything else, we pin to that version. # TLSv1 to 1.2 are supported on macOS 10.8+ _protocol_to_min_max = { util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), PROTOCOL_TLS_CLIENT: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), } if hasattr(ssl, "PROTOCOL_SSLv2"): _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = ( SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2, ) if hasattr(ssl, "PROTOCOL_SSLv3"): _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = ( SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3, ) if hasattr(ssl, "PROTOCOL_TLSv1"): _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = ( SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1, ) if hasattr(ssl, "PROTOCOL_TLSv1_1"): _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = ( SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11, ) if hasattr(ssl, "PROTOCOL_TLSv1_2"): _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = ( SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12, ) def inject_into_urllib3(): """ Monkey-patch urllib3 with SecureTransport-backed SSL-support. """ util.SSLContext = SecureTransportContext util.ssl_.SSLContext = SecureTransportContext util.HAS_SNI = HAS_SNI util.ssl_.HAS_SNI = HAS_SNI util.IS_SECURETRANSPORT = True util.ssl_.IS_SECURETRANSPORT = True def extract_from_urllib3(): """ Undo monkey-patching by :func:`inject_into_urllib3`. """ util.SSLContext = orig_util_SSLContext util.ssl_.SSLContext = orig_util_SSLContext util.HAS_SNI = orig_util_HAS_SNI util.ssl_.HAS_SNI = orig_util_HAS_SNI util.IS_SECURETRANSPORT = False util.ssl_.IS_SECURETRANSPORT = False def _read_callback(connection_id, data_buffer, data_length_pointer): """ SecureTransport read callback. This is called by ST to request that data be returned from the socket. """ wrapped_socket = None try: wrapped_socket = _connection_refs.get(connection_id) if wrapped_socket is None: return SecurityConst.errSSLInternal base_socket = wrapped_socket.socket requested_length = data_length_pointer[0] timeout = wrapped_socket.gettimeout() error = None read_count = 0 try: while read_count < requested_length: if timeout is None or timeout >= 0: if not util.wait_for_read(base_socket, timeout): raise socket.error(errno.EAGAIN, "timed out") remaining = requested_length - read_count buffer = (ctypes.c_char * remaining).from_address( data_buffer + read_count ) chunk_size = base_socket.recv_into(buffer, remaining) read_count += chunk_size if not chunk_size: if not read_count: return SecurityConst.errSSLClosedGraceful break except (socket.error) as e: error = e.errno if error is not None and error != errno.EAGAIN: data_length_pointer[0] = read_count if error == errno.ECONNRESET or error == errno.EPIPE: return SecurityConst.errSSLClosedAbort raise data_length_pointer[0] = read_count if read_count != requested_length: return SecurityConst.errSSLWouldBlock return 0 except Exception as e: if wrapped_socket is not None: wrapped_socket._exception = e return SecurityConst.errSSLInternal def _write_callback(connection_id, data_buffer, data_length_pointer): """ SecureTransport write callback. This is called by ST to request that data actually be sent on the network. """ wrapped_socket = None try: wrapped_socket = _connection_refs.get(connection_id) if wrapped_socket is None: return SecurityConst.errSSLInternal base_socket = wrapped_socket.socket bytes_to_write = data_length_pointer[0] data = ctypes.string_at(data_buffer, bytes_to_write) timeout = wrapped_socket.gettimeout() error = None sent = 0 try: while sent < bytes_to_write: if timeout is None or timeout >= 0: if not util.wait_for_write(base_socket, timeout): raise socket.error(errno.EAGAIN, "timed out") chunk_sent = base_socket.send(data) sent += chunk_sent # This has some needless copying here, but I'm not sure there's # much value in optimising this data path. data = data[chunk_sent:] except (socket.error) as e: error = e.errno if error is not None and error != errno.EAGAIN: data_length_pointer[0] = sent if error == errno.ECONNRESET or error == errno.EPIPE: return SecurityConst.errSSLClosedAbort raise data_length_pointer[0] = sent if sent != bytes_to_write: return SecurityConst.errSSLWouldBlock return 0 except Exception as e: if wrapped_socket is not None: wrapped_socket._exception = e return SecurityConst.errSSLInternal # We need to keep these two objects references alive: if they get GC'd while # in use then SecureTransport could attempt to call a function that is in freed # memory. That would be...uh...bad. Yeah, that's the word. Bad. _read_callback_pointer = Security.SSLReadFunc(_read_callback) _write_callback_pointer = Security.SSLWriteFunc(_write_callback) class WrappedSocket(object): """ API-compatibility wrapper for Python's OpenSSL wrapped socket object. Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage collector of PyPy. """ def __init__(self, socket): self.socket = socket self.context = None self._makefile_refs = 0 self._closed = False self._exception = None self._keychain = None self._keychain_dir = None self._client_cert_chain = None # We save off the previously-configured timeout and then set it to # zero. This is done because we use select and friends to handle the # timeouts, but if we leave the timeout set on the lower socket then # Python will "kindly" call select on that socket again for us. Avoid # that by forcing the timeout to zero. self._timeout = self.socket.gettimeout() self.socket.settimeout(0) @contextlib.contextmanager def _raise_on_error(self): """ A context manager that can be used to wrap calls that do I/O from SecureTransport. If any of the I/O callbacks hit an exception, this context manager will correctly propagate the exception after the fact. This avoids silently swallowing those exceptions. It also correctly forces the socket closed. """ self._exception = None # We explicitly don't catch around this yield because in the unlikely # event that an exception was hit in the block we don't want to swallow # it. yield if self._exception is not None: exception, self._exception = self._exception, None self.close() raise exception def _set_ciphers(self): """ Sets up the allowed ciphers. By default this matches the set in util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done custom and doesn't allow changing at this time, mostly because parsing OpenSSL cipher strings is going to be a freaking nightmare. """ ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES) result = Security.SSLSetEnabledCiphers( self.context, ciphers, len(CIPHER_SUITES) ) _assert_no_error(result) def _set_alpn_protocols(self, protocols): """ Sets up the ALPN protocols on the context. """ if not protocols: return protocols_arr = _create_cfstring_array(protocols) try: result = Security.SSLSetALPNProtocols(self.context, protocols_arr) _assert_no_error(result) finally: CoreFoundation.CFRelease(protocols_arr) def _custom_validate(self, verify, trust_bundle): """ Called when we have set custom validation. We do this in two cases: first, when cert validation is entirely disabled; and second, when using a custom trust DB. Raises an SSLError if the connection is not trusted. """ # If we disabled cert validation, just say: cool. if not verify: return successes = ( SecurityConst.kSecTrustResultUnspecified, SecurityConst.kSecTrustResultProceed, ) try: trust_result = self._evaluate_trust(trust_bundle) if trust_result in successes: return reason = "error code: %d" % (trust_result,) except Exception as e: # Do not trust on error reason = "exception: %r" % (e,) # SecureTransport does not send an alert nor shuts down the connection. rec = _build_tls_unknown_ca_alert(self.version()) self.socket.sendall(rec) # close the connection immediately # l_onoff = 1, activate linger # l_linger = 0, linger for 0 seoncds opts = struct.pack("ii", 1, 0) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, opts) self.close() raise ssl.SSLError("certificate verify failed, %s" % reason) def _evaluate_trust(self, trust_bundle): # We want data in memory, so load it up. if os.path.isfile(trust_bundle): with open(trust_bundle, "rb") as f: trust_bundle = f.read() cert_array = None trust = Security.SecTrustRef() try: # Get a CFArray that contains the certs we want. cert_array = _cert_array_from_pem(trust_bundle) # Ok, now the hard part. We want to get the SecTrustRef that ST has # created for this connection, shove our CAs into it, tell ST to # ignore everything else it knows, and then ask if it can build a # chain. This is a buuuunch of code. result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust)) _assert_no_error(result) if not trust: raise ssl.SSLError("Failed to copy trust reference") result = Security.SecTrustSetAnchorCertificates(trust, cert_array) _assert_no_error(result) result = Security.SecTrustSetAnchorCertificatesOnly(trust, True) _assert_no_error(result) trust_result = Security.SecTrustResultType() result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result)) _assert_no_error(result) finally: if trust: CoreFoundation.CFRelease(trust) if cert_array is not None: CoreFoundation.CFRelease(cert_array) return trust_result.value def handshake( self, server_hostname, verify, trust_bundle, min_version, max_version, client_cert, client_key, client_key_passphrase, alpn_protocols, ): """ Actually performs the TLS handshake. This is run automatically by wrapped socket, and shouldn't be needed in user code. """ # First, we do the initial bits of connection setup. We need to create # a context, set its I/O funcs, and set the connection reference. self.context = Security.SSLCreateContext( None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType ) result = Security.SSLSetIOFuncs( self.context, _read_callback_pointer, _write_callback_pointer ) _assert_no_error(result) # Here we need to compute the handle to use. We do this by taking the # id of self modulo 2**31 - 1. If this is already in the dictionary, we # just keep incrementing by one until we find a free space. with _connection_ref_lock: handle = id(self) % 2147483647 while handle in _connection_refs: handle = (handle + 1) % 2147483647 _connection_refs[handle] = self result = Security.SSLSetConnection(self.context, handle) _assert_no_error(result) # If we have a server hostname, we should set that too. if server_hostname: if not isinstance(server_hostname, bytes): server_hostname = server_hostname.encode("utf-8") result = Security.SSLSetPeerDomainName( self.context, server_hostname, len(server_hostname) ) _assert_no_error(result) # Setup the ciphers. self._set_ciphers() # Setup the ALPN protocols. self._set_alpn_protocols(alpn_protocols) # Set the minimum and maximum TLS versions. result = Security.SSLSetProtocolVersionMin(self.context, min_version) _assert_no_error(result) result = Security.SSLSetProtocolVersionMax(self.context, max_version) _assert_no_error(result) # If there's a trust DB, we need to use it. We do that by telling # SecureTransport to break on server auth. We also do that if we don't # want to validate the certs at all: we just won't actually do any # authing in that case. if not verify or trust_bundle is not None: result = Security.SSLSetSessionOption( self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True ) _assert_no_error(result) # If there's a client cert, we need to use it. if client_cert: self._keychain, self._keychain_dir = _temporary_keychain() self._client_cert_chain = _load_client_cert_chain( self._keychain, client_cert, client_key ) result = Security.SSLSetCertificate(self.context, self._client_cert_chain) _assert_no_error(result) while True: with self._raise_on_error(): result = Security.SSLHandshake(self.context) if result == SecurityConst.errSSLWouldBlock: raise socket.timeout("handshake timed out") elif result == SecurityConst.errSSLServerAuthCompleted: self._custom_validate(verify, trust_bundle) continue else: _assert_no_error(result) break def fileno(self): return self.socket.fileno() # Copy-pasted from Python 3.5 source code def _decref_socketios(self): if self._makefile_refs > 0: self._makefile_refs -= 1 if self._closed: self.close() def recv(self, bufsiz): buffer = ctypes.create_string_buffer(bufsiz) bytes_read = self.recv_into(buffer, bufsiz) data = buffer[:bytes_read] return data def recv_into(self, buffer, nbytes=None): # Read short on EOF. if self._closed: return 0 if nbytes is None: nbytes = len(buffer) buffer = (ctypes.c_char * nbytes).from_buffer(buffer) processed_bytes = ctypes.c_size_t(0) with self._raise_on_error(): result = Security.SSLRead( self.context, buffer, nbytes, ctypes.byref(processed_bytes) ) # There are some result codes that we want to treat as "not always # errors". Specifically, those are errSSLWouldBlock, # errSSLClosedGraceful, and errSSLClosedNoNotify. if result == SecurityConst.errSSLWouldBlock: # If we didn't process any bytes, then this was just a time out. # However, we can get errSSLWouldBlock in situations when we *did* # read some data, and in those cases we should just read "short" # and return. if processed_bytes.value == 0: # Timed out, no data read. raise socket.timeout("recv timed out") elif result in ( SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify, ): # The remote peer has closed this connection. We should do so as # well. Note that we don't actually return here because in # principle this could actually be fired along with return data. # It's unlikely though. self.close() else: _assert_no_error(result) # Ok, we read and probably succeeded. We should return whatever data # was actually read. return processed_bytes.value def settimeout(self, timeout): self._timeout = timeout def gettimeout(self): return self._timeout def send(self, data): processed_bytes = ctypes.c_size_t(0) with self._raise_on_error(): result = Security.SSLWrite( self.context, data, len(data), ctypes.byref(processed_bytes) ) if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0: # Timed out raise socket.timeout("send timed out") else: _assert_no_error(result) # We sent, and probably succeeded. Tell them how much we sent. return processed_bytes.value def sendall(self, data): total_sent = 0 while total_sent < len(data): sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE]) total_sent += sent def shutdown(self): with self._raise_on_error(): Security.SSLClose(self.context) def close(self): # TODO: should I do clean shutdown here? Do I have to? if self._makefile_refs < 1: self._closed = True if self.context: CoreFoundation.CFRelease(self.context) self.context = None if self._client_cert_chain: CoreFoundation.CFRelease(self._client_cert_chain) self._client_cert_chain = None if self._keychain: Security.SecKeychainDelete(self._keychain) CoreFoundation.CFRelease(self._keychain) shutil.rmtree(self._keychain_dir) self._keychain = self._keychain_dir = None return self.socket.close() else: self._makefile_refs -= 1 def getpeercert(self, binary_form=False): # Urgh, annoying. # # Here's how we do this: # # 1. Call SSLCopyPeerTrust to get hold of the trust object for this # connection. # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf. # 3. To get the CN, call SecCertificateCopyCommonName and process that # string so that it's of the appropriate type. # 4. To get the SAN, we need to do something a bit more complex: # a. Call SecCertificateCopyValues to get the data, requesting # kSecOIDSubjectAltName. # b. Mess about with this dictionary to try to get the SANs out. # # This is gross. Really gross. It's going to be a few hundred LoC extra # just to repeat something that SecureTransport can *already do*. So my # operating assumption at this time is that what we want to do is # instead to just flag to urllib3 that it shouldn't do its own hostname # validation when using SecureTransport. if not binary_form: raise ValueError("SecureTransport only supports dumping binary certs") trust = Security.SecTrustRef() certdata = None der_bytes = None try: # Grab the trust store. result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust)) _assert_no_error(result) if not trust: # Probably we haven't done the handshake yet. No biggie. return None cert_count = Security.SecTrustGetCertificateCount(trust) if not cert_count: # Also a case that might happen if we haven't handshaked. # Handshook? Handshaken? return None leaf = Security.SecTrustGetCertificateAtIndex(trust, 0) assert leaf # Ok, now we want the DER bytes. certdata = Security.SecCertificateCopyData(leaf) assert certdata data_length = CoreFoundation.CFDataGetLength(certdata) data_buffer = CoreFoundation.CFDataGetBytePtr(certdata) der_bytes = ctypes.string_at(data_buffer, data_length) finally: if certdata: CoreFoundation.CFRelease(certdata) if trust: CoreFoundation.CFRelease(trust) return der_bytes def version(self): protocol = Security.SSLProtocol() result = Security.SSLGetNegotiatedProtocolVersion( self.context, ctypes.byref(protocol) ) _assert_no_error(result) if protocol.value == SecurityConst.kTLSProtocol13: raise ssl.SSLError("SecureTransport does not support TLS 1.3") elif protocol.value == SecurityConst.kTLSProtocol12: return "TLSv1.2" elif protocol.value == SecurityConst.kTLSProtocol11: return "TLSv1.1" elif protocol.value == SecurityConst.kTLSProtocol1: return "TLSv1" elif protocol.value == SecurityConst.kSSLProtocol3: return "SSLv3" elif protocol.value == SecurityConst.kSSLProtocol2: return "SSLv2" else: raise ssl.SSLError("Unknown TLS version: %r" % protocol) def _reuse(self): self._makefile_refs += 1 def _drop(self): if self._makefile_refs < 1: self.close() else: self._makefile_refs -= 1 if _fileobject: # Platform-specific: Python 2 def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) else: # Platform-specific: Python 3 def makefile(self, mode="r", buffering=None, *args, **kwargs): # We disable buffering with SecureTransport because it conflicts with # the buffering that ST does internally (see issue #1153 for more). buffering = 0 return backport_makefile(self, mode, buffering, *args, **kwargs) WrappedSocket.makefile = makefile class SecureTransportContext(object): """ I am a wrapper class for the SecureTransport library, to translate the interface of the standard library ``SSLContext`` object to calls into SecureTransport. """ def __init__(self, protocol): self._min_version, self._max_version = _protocol_to_min_max[protocol] self._options = 0 self._verify = False self._trust_bundle = None self._client_cert = None self._client_key = None self._client_key_passphrase = None self._alpn_protocols = None @property def check_hostname(self): """ SecureTransport cannot have its hostname checking disabled. For more, see the comment on getpeercert() in this file. """ return True @check_hostname.setter def check_hostname(self, value): """ SecureTransport cannot have its hostname checking disabled. For more, see the comment on getpeercert() in this file. """ pass @property def options(self): # TODO: Well, crap. # # So this is the bit of the code that is the most likely to cause us # trouble. Essentially we need to enumerate all of the SSL options that # users might want to use and try to see if we can sensibly translate # them, or whether we should just ignore them. return self._options @options.setter def options(self, value): # TODO: Update in line with above. self._options = value @property def verify_mode(self): return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE @verify_mode.setter def verify_mode(self, value): self._verify = True if value == ssl.CERT_REQUIRED else False def set_default_verify_paths(self): # So, this has to do something a bit weird. Specifically, what it does # is nothing. # # This means that, if we had previously had load_verify_locations # called, this does not undo that. We need to do that because it turns # out that the rest of the urllib3 code will attempt to load the # default verify paths if it hasn't been told about any paths, even if # the context itself was sometime earlier. We resolve that by just # ignoring it. pass def load_default_certs(self): return self.set_default_verify_paths() def set_ciphers(self, ciphers): # For now, we just require the default cipher string. if ciphers != util.ssl_.DEFAULT_CIPHERS: raise ValueError("SecureTransport doesn't support custom cipher strings") def load_verify_locations(self, cafile=None, capath=None, cadata=None): # OK, we only really support cadata and cafile. if capath is not None: raise ValueError("SecureTransport does not support cert directories") # Raise if cafile does not exist. if cafile is not None: with open(cafile): pass self._trust_bundle = cafile or cadata def load_cert_chain(self, certfile, keyfile=None, password=None): self._client_cert = certfile self._client_key = keyfile self._client_cert_passphrase = password def set_alpn_protocols(self, protocols): """ Sets the ALPN protocols that will later be set on the context. Raises a NotImplementedError if ALPN is not supported. """ if not hasattr(Security, "SSLSetALPNProtocols"): raise NotImplementedError( "SecureTransport supports ALPN only in macOS 10.12+" ) self._alpn_protocols = [six.ensure_binary(p) for p in protocols] def wrap_socket( self, sock, server_side=False, do_handshake_on_connect=True, suppress_ragged_eofs=True, server_hostname=None, ): # So, what do we do here? Firstly, we assert some properties. This is a # stripped down shim, so there is some functionality we don't support. # See PEP 543 for the real deal. assert not server_side assert do_handshake_on_connect assert suppress_ragged_eofs # Ok, we're good to go. Now we want to create the wrapped socket object # and store it in the appropriate place. wrapped_socket = WrappedSocket(sock) # Now we can handshake wrapped_socket.handshake( server_hostname, self._verify, self._trust_bundle, self._min_version, self._max_version, self._client_cert, self._client_key, self._client_key_passphrase, self._alpn_protocols, ) return wrapped_socket
34,416
Python
36.328633
86
0.634821
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/_appengine_environ.py
""" This module provides means to detect the App Engine environment. """ import os def is_appengine(): return is_local_appengine() or is_prod_appengine() def is_appengine_sandbox(): """Reports if the app is running in the first generation sandbox. The second generation runtimes are technically still in a sandbox, but it is much less restrictive, so generally you shouldn't need to check for it. see https://cloud.google.com/appengine/docs/standard/runtimes """ return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27" def is_local_appengine(): return "APPENGINE_RUNTIME" in os.environ and os.environ.get( "SERVER_SOFTWARE", "" ).startswith("Development/") def is_prod_appengine(): return "APPENGINE_RUNTIME" in os.environ and os.environ.get( "SERVER_SOFTWARE", "" ).startswith("Google App Engine/") def is_prod_appengine_mvms(): """Deprecated.""" return False
957
Python
24.891891
78
0.6907
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/ntlmpool.py
""" NTLM authenticating pool, contributed by erikcederstran Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 """ from __future__ import absolute_import import warnings from logging import getLogger from ntlm import ntlm from .. import HTTPSConnectionPool from ..packages.six.moves.http_client import HTTPSConnection warnings.warn( "The 'urllib3.contrib.ntlmpool' module is deprecated and will be removed " "in urllib3 v2.0 release, urllib3 is not able to support it properly due " "to reasons listed in issue: https://github.com/urllib3/urllib3/issues/2282. " "If you are a user of this module please comment in the mentioned issue.", DeprecationWarning, ) log = getLogger(__name__) class NTLMConnectionPool(HTTPSConnectionPool): """ Implements an NTLM authentication version of an urllib3 connection pool """ scheme = "https" def __init__(self, user, pw, authurl, *args, **kwargs): """ authurl is a random URL on the server that is protected by NTLM. user is the Windows user, probably in the DOMAIN\\username format. pw is the password for the user. """ super(NTLMConnectionPool, self).__init__(*args, **kwargs) self.authurl = authurl self.rawuser = user user_parts = user.split("\\", 1) self.domain = user_parts[0].upper() self.user = user_parts[1] self.pw = pw def _new_conn(self): # Performs the NTLM handshake that secures the connection. The socket # must be kept open while requests are performed. self.num_connections += 1 log.debug( "Starting NTLM HTTPS connection no. %d: https://%s%s", self.num_connections, self.host, self.authurl, ) headers = {"Connection": "Keep-Alive"} req_header = "Authorization" resp_header = "www-authenticate" conn = HTTPSConnection(host=self.host, port=self.port) # Send negotiation message headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE( self.rawuser ) log.debug("Request headers: %s", headers) conn.request("GET", self.authurl, None, headers) res = conn.getresponse() reshdr = dict(res.headers) log.debug("Response status: %s %s", res.status, res.reason) log.debug("Response headers: %s", reshdr) log.debug("Response data: %s [...]", res.read(100)) # Remove the reference to the socket, so that it can not be closed by # the response object (we want to keep the socket open) res.fp = None # Server should respond with a challenge message auth_header_values = reshdr[resp_header].split(", ") auth_header_value = None for s in auth_header_values: if s[:5] == "NTLM ": auth_header_value = s[5:] if auth_header_value is None: raise Exception( "Unexpected %s response header: %s" % (resp_header, reshdr[resp_header]) ) # Send authentication message ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE( auth_header_value ) auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE( ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags ) headers[req_header] = "NTLM %s" % auth_msg log.debug("Request headers: %s", headers) conn.request("GET", self.authurl, None, headers) res = conn.getresponse() log.debug("Response status: %s %s", res.status, res.reason) log.debug("Response headers: %s", dict(res.headers)) log.debug("Response data: %s [...]", res.read()[:100]) if res.status != 200: if res.status == 401: raise Exception("Server rejected request: wrong username or password") raise Exception("Wrong server response: %s %s" % (res.status, res.reason)) res.fp = None log.debug("Connection established") return conn def urlopen( self, method, url, body=None, headers=None, retries=3, redirect=True, assert_same_host=True, ): if headers is None: headers = {} headers["Connection"] = "Keep-Alive" return super(NTLMConnectionPool, self).urlopen( method, url, body, headers, retries, redirect, assert_same_host )
4,528
Python
33.572519
88
0.605345
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/_securetransport/low_level.py
""" Low-level helpers for the SecureTransport bindings. These are Python functions that are not directly related to the high-level APIs but are necessary to get them to work. They include a whole bunch of low-level CoreFoundation messing about and memory management. The concerns in this module are almost entirely about trying to avoid memory leaks and providing appropriate and useful assistance to the higher-level code. """ import base64 import ctypes import itertools import os import re import ssl import struct import tempfile from .bindings import CFConst, CoreFoundation, Security # This regular expression is used to grab PEM data out of a PEM bundle. _PEM_CERTS_RE = re.compile( b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL ) def _cf_data_from_bytes(bytestring): """ Given a bytestring, create a CFData object from it. This CFData object must be CFReleased by the caller. """ return CoreFoundation.CFDataCreate( CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring) ) def _cf_dictionary_from_tuples(tuples): """ Given a list of Python tuples, create an associated CFDictionary. """ dictionary_size = len(tuples) # We need to get the dictionary keys and values out in the same order. keys = (t[0] for t in tuples) values = (t[1] for t in tuples) cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys) cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values) return CoreFoundation.CFDictionaryCreate( CoreFoundation.kCFAllocatorDefault, cf_keys, cf_values, dictionary_size, CoreFoundation.kCFTypeDictionaryKeyCallBacks, CoreFoundation.kCFTypeDictionaryValueCallBacks, ) def _cfstr(py_bstr): """ Given a Python binary data, create a CFString. The string must be CFReleased by the caller. """ c_str = ctypes.c_char_p(py_bstr) cf_str = CoreFoundation.CFStringCreateWithCString( CoreFoundation.kCFAllocatorDefault, c_str, CFConst.kCFStringEncodingUTF8, ) return cf_str def _create_cfstring_array(lst): """ Given a list of Python binary data, create an associated CFMutableArray. The array must be CFReleased by the caller. Raises an ssl.SSLError on failure. """ cf_arr = None try: cf_arr = CoreFoundation.CFArrayCreateMutable( CoreFoundation.kCFAllocatorDefault, 0, ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), ) if not cf_arr: raise MemoryError("Unable to allocate memory!") for item in lst: cf_str = _cfstr(item) if not cf_str: raise MemoryError("Unable to allocate memory!") try: CoreFoundation.CFArrayAppendValue(cf_arr, cf_str) finally: CoreFoundation.CFRelease(cf_str) except BaseException as e: if cf_arr: CoreFoundation.CFRelease(cf_arr) raise ssl.SSLError("Unable to allocate array: %s" % (e,)) return cf_arr def _cf_string_to_unicode(value): """ Creates a Unicode string from a CFString object. Used entirely for error reporting. Yes, it annoys me quite a lot that this function is this complex. """ value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p)) string = CoreFoundation.CFStringGetCStringPtr( value_as_void_p, CFConst.kCFStringEncodingUTF8 ) if string is None: buffer = ctypes.create_string_buffer(1024) result = CoreFoundation.CFStringGetCString( value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8 ) if not result: raise OSError("Error copying C string from CFStringRef") string = buffer.value if string is not None: string = string.decode("utf-8") return string def _assert_no_error(error, exception_class=None): """ Checks the return code and throws an exception if there is an error to report """ if error == 0: return cf_error_string = Security.SecCopyErrorMessageString(error, None) output = _cf_string_to_unicode(cf_error_string) CoreFoundation.CFRelease(cf_error_string) if output is None or output == u"": output = u"OSStatus %s" % error if exception_class is None: exception_class = ssl.SSLError raise exception_class(output) def _cert_array_from_pem(pem_bundle): """ Given a bundle of certs in PEM format, turns them into a CFArray of certs that can be used to validate a cert chain. """ # Normalize the PEM bundle's line endings. pem_bundle = pem_bundle.replace(b"\r\n", b"\n") der_certs = [ base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle) ] if not der_certs: raise ssl.SSLError("No root certificates specified") cert_array = CoreFoundation.CFArrayCreateMutable( CoreFoundation.kCFAllocatorDefault, 0, ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), ) if not cert_array: raise ssl.SSLError("Unable to allocate memory!") try: for der_bytes in der_certs: certdata = _cf_data_from_bytes(der_bytes) if not certdata: raise ssl.SSLError("Unable to allocate memory!") cert = Security.SecCertificateCreateWithData( CoreFoundation.kCFAllocatorDefault, certdata ) CoreFoundation.CFRelease(certdata) if not cert: raise ssl.SSLError("Unable to build cert object!") CoreFoundation.CFArrayAppendValue(cert_array, cert) CoreFoundation.CFRelease(cert) except Exception: # We need to free the array before the exception bubbles further. # We only want to do that if an error occurs: otherwise, the caller # should free. CoreFoundation.CFRelease(cert_array) raise return cert_array def _is_cert(item): """ Returns True if a given CFTypeRef is a certificate. """ expected = Security.SecCertificateGetTypeID() return CoreFoundation.CFGetTypeID(item) == expected def _is_identity(item): """ Returns True if a given CFTypeRef is an identity. """ expected = Security.SecIdentityGetTypeID() return CoreFoundation.CFGetTypeID(item) == expected def _temporary_keychain(): """ This function creates a temporary Mac keychain that we can use to work with credentials. This keychain uses a one-time password and a temporary file to store the data. We expect to have one keychain per socket. The returned SecKeychainRef must be freed by the caller, including calling SecKeychainDelete. Returns a tuple of the SecKeychainRef and the path to the temporary directory that contains it. """ # Unfortunately, SecKeychainCreate requires a path to a keychain. This # means we cannot use mkstemp to use a generic temporary file. Instead, # we're going to create a temporary directory and a filename to use there. # This filename will be 8 random bytes expanded into base64. We also need # some random bytes to password-protect the keychain we're creating, so we # ask for 40 random bytes. random_bytes = os.urandom(40) filename = base64.b16encode(random_bytes[:8]).decode("utf-8") password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8 tempdirectory = tempfile.mkdtemp() keychain_path = os.path.join(tempdirectory, filename).encode("utf-8") # We now want to create the keychain itself. keychain = Security.SecKeychainRef() status = Security.SecKeychainCreate( keychain_path, len(password), password, False, None, ctypes.byref(keychain) ) _assert_no_error(status) # Having created the keychain, we want to pass it off to the caller. return keychain, tempdirectory def _load_items_from_file(keychain, path): """ Given a single file, loads all the trust objects from it into arrays and the keychain. Returns a tuple of lists: the first list is a list of identities, the second a list of certs. """ certificates = [] identities = [] result_array = None with open(path, "rb") as f: raw_filedata = f.read() try: filedata = CoreFoundation.CFDataCreate( CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata) ) result_array = CoreFoundation.CFArrayRef() result = Security.SecItemImport( filedata, # cert data None, # Filename, leaving it out for now None, # What the type of the file is, we don't care None, # what's in the file, we don't care 0, # import flags None, # key params, can include passphrase in the future keychain, # The keychain to insert into ctypes.byref(result_array), # Results ) _assert_no_error(result) # A CFArray is not very useful to us as an intermediary # representation, so we are going to extract the objects we want # and then free the array. We don't need to keep hold of keys: the # keychain already has them! result_count = CoreFoundation.CFArrayGetCount(result_array) for index in range(result_count): item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index) item = ctypes.cast(item, CoreFoundation.CFTypeRef) if _is_cert(item): CoreFoundation.CFRetain(item) certificates.append(item) elif _is_identity(item): CoreFoundation.CFRetain(item) identities.append(item) finally: if result_array: CoreFoundation.CFRelease(result_array) CoreFoundation.CFRelease(filedata) return (identities, certificates) def _load_client_cert_chain(keychain, *paths): """ Load certificates and maybe keys from a number of files. Has the end goal of returning a CFArray containing one SecIdentityRef, and then zero or more SecCertificateRef objects, suitable for use as a client certificate trust chain. """ # Ok, the strategy. # # This relies on knowing that macOS will not give you a SecIdentityRef # unless you have imported a key into a keychain. This is a somewhat # artificial limitation of macOS (for example, it doesn't necessarily # affect iOS), but there is nothing inside Security.framework that lets you # get a SecIdentityRef without having a key in a keychain. # # So the policy here is we take all the files and iterate them in order. # Each one will use SecItemImport to have one or more objects loaded from # it. We will also point at a keychain that macOS can use to work with the # private key. # # Once we have all the objects, we'll check what we actually have. If we # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise, # we'll take the first certificate (which we assume to be our leaf) and # ask the keychain to give us a SecIdentityRef with that cert's associated # key. # # We'll then return a CFArray containing the trust chain: one # SecIdentityRef and then zero-or-more SecCertificateRef objects. The # responsibility for freeing this CFArray will be with the caller. This # CFArray must remain alive for the entire connection, so in practice it # will be stored with a single SSLSocket, along with the reference to the # keychain. certificates = [] identities = [] # Filter out bad paths. paths = (path for path in paths if path) try: for file_path in paths: new_identities, new_certs = _load_items_from_file(keychain, file_path) identities.extend(new_identities) certificates.extend(new_certs) # Ok, we have everything. The question is: do we have an identity? If # not, we want to grab one from the first cert we have. if not identities: new_identity = Security.SecIdentityRef() status = Security.SecIdentityCreateWithCertificate( keychain, certificates[0], ctypes.byref(new_identity) ) _assert_no_error(status) identities.append(new_identity) # We now want to release the original certificate, as we no longer # need it. CoreFoundation.CFRelease(certificates.pop(0)) # We now need to build a new CFArray that holds the trust chain. trust_chain = CoreFoundation.CFArrayCreateMutable( CoreFoundation.kCFAllocatorDefault, 0, ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), ) for item in itertools.chain(identities, certificates): # ArrayAppendValue does a CFRetain on the item. That's fine, # because the finally block will release our other refs to them. CoreFoundation.CFArrayAppendValue(trust_chain, item) return trust_chain finally: for obj in itertools.chain(identities, certificates): CoreFoundation.CFRelease(obj) TLS_PROTOCOL_VERSIONS = { "SSLv2": (0, 2), "SSLv3": (3, 0), "TLSv1": (3, 1), "TLSv1.1": (3, 2), "TLSv1.2": (3, 3), } def _build_tls_unknown_ca_alert(version): """ Builds a TLS alert record for an unknown CA. """ ver_maj, ver_min = TLS_PROTOCOL_VERSIONS[version] severity_fatal = 0x02 description_unknown_ca = 0x30 msg = struct.pack(">BB", severity_fatal, description_unknown_ca) msg_len = len(msg) record_type_alert = 0x15 record = struct.pack(">BBBH", record_type_alert, ver_maj, ver_min, msg_len) + msg return record
13,922
Python
33.982412
88
0.659316
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/_securetransport/bindings.py
""" This module uses ctypes to bind a whole bunch of functions and constants from SecureTransport. The goal here is to provide the low-level API to SecureTransport. These are essentially the C-level functions and constants, and they're pretty gross to work with. This code is a bastardised version of the code found in Will Bond's oscrypto library. An enormous debt is owed to him for blazing this trail for us. For that reason, this code should be considered to be covered both by urllib3's license and by oscrypto's: Copyright (c) 2015-2016 Will Bond <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import absolute_import import platform from ctypes import ( CDLL, CFUNCTYPE, POINTER, c_bool, c_byte, c_char_p, c_int32, c_long, c_size_t, c_uint32, c_ulong, c_void_p, ) from ctypes.util import find_library from ...packages.six import raise_from if platform.system() != "Darwin": raise ImportError("Only macOS is supported") version = platform.mac_ver()[0] version_info = tuple(map(int, version.split("."))) if version_info < (10, 8): raise OSError( "Only OS X 10.8 and newer are supported, not %s.%s" % (version_info[0], version_info[1]) ) def load_cdll(name, macos10_16_path): """Loads a CDLL by name, falling back to known path on 10.16+""" try: # Big Sur is technically 11 but we use 10.16 due to the Big Sur # beta being labeled as 10.16. if version_info >= (10, 16): path = macos10_16_path else: path = find_library(name) if not path: raise OSError # Caught and reraised as 'ImportError' return CDLL(path, use_errno=True) except OSError: raise_from(ImportError("The library %s failed to load" % name), None) Security = load_cdll( "Security", "/System/Library/Frameworks/Security.framework/Security" ) CoreFoundation = load_cdll( "CoreFoundation", "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation", ) Boolean = c_bool CFIndex = c_long CFStringEncoding = c_uint32 CFData = c_void_p CFString = c_void_p CFArray = c_void_p CFMutableArray = c_void_p CFDictionary = c_void_p CFError = c_void_p CFType = c_void_p CFTypeID = c_ulong CFTypeRef = POINTER(CFType) CFAllocatorRef = c_void_p OSStatus = c_int32 CFDataRef = POINTER(CFData) CFStringRef = POINTER(CFString) CFArrayRef = POINTER(CFArray) CFMutableArrayRef = POINTER(CFMutableArray) CFDictionaryRef = POINTER(CFDictionary) CFArrayCallBacks = c_void_p CFDictionaryKeyCallBacks = c_void_p CFDictionaryValueCallBacks = c_void_p SecCertificateRef = POINTER(c_void_p) SecExternalFormat = c_uint32 SecExternalItemType = c_uint32 SecIdentityRef = POINTER(c_void_p) SecItemImportExportFlags = c_uint32 SecItemImportExportKeyParameters = c_void_p SecKeychainRef = POINTER(c_void_p) SSLProtocol = c_uint32 SSLCipherSuite = c_uint32 SSLContextRef = POINTER(c_void_p) SecTrustRef = POINTER(c_void_p) SSLConnectionRef = c_uint32 SecTrustResultType = c_uint32 SecTrustOptionFlags = c_uint32 SSLProtocolSide = c_uint32 SSLConnectionType = c_uint32 SSLSessionOption = c_uint32 try: Security.SecItemImport.argtypes = [ CFDataRef, CFStringRef, POINTER(SecExternalFormat), POINTER(SecExternalItemType), SecItemImportExportFlags, POINTER(SecItemImportExportKeyParameters), SecKeychainRef, POINTER(CFArrayRef), ] Security.SecItemImport.restype = OSStatus Security.SecCertificateGetTypeID.argtypes = [] Security.SecCertificateGetTypeID.restype = CFTypeID Security.SecIdentityGetTypeID.argtypes = [] Security.SecIdentityGetTypeID.restype = CFTypeID Security.SecKeyGetTypeID.argtypes = [] Security.SecKeyGetTypeID.restype = CFTypeID Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef] Security.SecCertificateCreateWithData.restype = SecCertificateRef Security.SecCertificateCopyData.argtypes = [SecCertificateRef] Security.SecCertificateCopyData.restype = CFDataRef Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p] Security.SecCopyErrorMessageString.restype = CFStringRef Security.SecIdentityCreateWithCertificate.argtypes = [ CFTypeRef, SecCertificateRef, POINTER(SecIdentityRef), ] Security.SecIdentityCreateWithCertificate.restype = OSStatus Security.SecKeychainCreate.argtypes = [ c_char_p, c_uint32, c_void_p, Boolean, c_void_p, POINTER(SecKeychainRef), ] Security.SecKeychainCreate.restype = OSStatus Security.SecKeychainDelete.argtypes = [SecKeychainRef] Security.SecKeychainDelete.restype = OSStatus Security.SecPKCS12Import.argtypes = [ CFDataRef, CFDictionaryRef, POINTER(CFArrayRef), ] Security.SecPKCS12Import.restype = OSStatus SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t)) SSLWriteFunc = CFUNCTYPE( OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t) ) Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc] Security.SSLSetIOFuncs.restype = OSStatus Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t] Security.SSLSetPeerID.restype = OSStatus Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef] Security.SSLSetCertificate.restype = OSStatus Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean] Security.SSLSetCertificateAuthorities.restype = OSStatus Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef] Security.SSLSetConnection.restype = OSStatus Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t] Security.SSLSetPeerDomainName.restype = OSStatus Security.SSLHandshake.argtypes = [SSLContextRef] Security.SSLHandshake.restype = OSStatus Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)] Security.SSLRead.restype = OSStatus Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)] Security.SSLWrite.restype = OSStatus Security.SSLClose.argtypes = [SSLContextRef] Security.SSLClose.restype = OSStatus Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)] Security.SSLGetNumberSupportedCiphers.restype = OSStatus Security.SSLGetSupportedCiphers.argtypes = [ SSLContextRef, POINTER(SSLCipherSuite), POINTER(c_size_t), ] Security.SSLGetSupportedCiphers.restype = OSStatus Security.SSLSetEnabledCiphers.argtypes = [ SSLContextRef, POINTER(SSLCipherSuite), c_size_t, ] Security.SSLSetEnabledCiphers.restype = OSStatus Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)] Security.SSLGetNumberEnabledCiphers.restype = OSStatus Security.SSLGetEnabledCiphers.argtypes = [ SSLContextRef, POINTER(SSLCipherSuite), POINTER(c_size_t), ] Security.SSLGetEnabledCiphers.restype = OSStatus Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)] Security.SSLGetNegotiatedCipher.restype = OSStatus Security.SSLGetNegotiatedProtocolVersion.argtypes = [ SSLContextRef, POINTER(SSLProtocol), ] Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)] Security.SSLCopyPeerTrust.restype = OSStatus Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef] Security.SecTrustSetAnchorCertificates.restype = OSStatus Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean] Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)] Security.SecTrustEvaluate.restype = OSStatus Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef] Security.SecTrustGetCertificateCount.restype = CFIndex Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex] Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef Security.SSLCreateContext.argtypes = [ CFAllocatorRef, SSLProtocolSide, SSLConnectionType, ] Security.SSLCreateContext.restype = SSLContextRef Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean] Security.SSLSetSessionOption.restype = OSStatus Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol] Security.SSLSetProtocolVersionMin.restype = OSStatus Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol] Security.SSLSetProtocolVersionMax.restype = OSStatus try: Security.SSLSetALPNProtocols.argtypes = [SSLContextRef, CFArrayRef] Security.SSLSetALPNProtocols.restype = OSStatus except AttributeError: # Supported only in 10.12+ pass Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p] Security.SecCopyErrorMessageString.restype = CFStringRef Security.SSLReadFunc = SSLReadFunc Security.SSLWriteFunc = SSLWriteFunc Security.SSLContextRef = SSLContextRef Security.SSLProtocol = SSLProtocol Security.SSLCipherSuite = SSLCipherSuite Security.SecIdentityRef = SecIdentityRef Security.SecKeychainRef = SecKeychainRef Security.SecTrustRef = SecTrustRef Security.SecTrustResultType = SecTrustResultType Security.SecExternalFormat = SecExternalFormat Security.OSStatus = OSStatus Security.kSecImportExportPassphrase = CFStringRef.in_dll( Security, "kSecImportExportPassphrase" ) Security.kSecImportItemIdentity = CFStringRef.in_dll( Security, "kSecImportItemIdentity" ) # CoreFoundation time! CoreFoundation.CFRetain.argtypes = [CFTypeRef] CoreFoundation.CFRetain.restype = CFTypeRef CoreFoundation.CFRelease.argtypes = [CFTypeRef] CoreFoundation.CFRelease.restype = None CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef] CoreFoundation.CFGetTypeID.restype = CFTypeID CoreFoundation.CFStringCreateWithCString.argtypes = [ CFAllocatorRef, c_char_p, CFStringEncoding, ] CoreFoundation.CFStringCreateWithCString.restype = CFStringRef CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding] CoreFoundation.CFStringGetCStringPtr.restype = c_char_p CoreFoundation.CFStringGetCString.argtypes = [ CFStringRef, c_char_p, CFIndex, CFStringEncoding, ] CoreFoundation.CFStringGetCString.restype = c_bool CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex] CoreFoundation.CFDataCreate.restype = CFDataRef CoreFoundation.CFDataGetLength.argtypes = [CFDataRef] CoreFoundation.CFDataGetLength.restype = CFIndex CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef] CoreFoundation.CFDataGetBytePtr.restype = c_void_p CoreFoundation.CFDictionaryCreate.argtypes = [ CFAllocatorRef, POINTER(CFTypeRef), POINTER(CFTypeRef), CFIndex, CFDictionaryKeyCallBacks, CFDictionaryValueCallBacks, ] CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef] CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef CoreFoundation.CFArrayCreate.argtypes = [ CFAllocatorRef, POINTER(CFTypeRef), CFIndex, CFArrayCallBacks, ] CoreFoundation.CFArrayCreate.restype = CFArrayRef CoreFoundation.CFArrayCreateMutable.argtypes = [ CFAllocatorRef, CFIndex, CFArrayCallBacks, ] CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p] CoreFoundation.CFArrayAppendValue.restype = None CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef] CoreFoundation.CFArrayGetCount.restype = CFIndex CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex] CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll( CoreFoundation, "kCFAllocatorDefault" ) CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll( CoreFoundation, "kCFTypeArrayCallBacks" ) CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll( CoreFoundation, "kCFTypeDictionaryKeyCallBacks" ) CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll( CoreFoundation, "kCFTypeDictionaryValueCallBacks" ) CoreFoundation.CFTypeRef = CFTypeRef CoreFoundation.CFArrayRef = CFArrayRef CoreFoundation.CFStringRef = CFStringRef CoreFoundation.CFDictionaryRef = CFDictionaryRef except (AttributeError): raise ImportError("Error initializing ctypes") class CFConst(object): """ A class object that acts as essentially a namespace for CoreFoundation constants. """ kCFStringEncodingUTF8 = CFStringEncoding(0x08000100) class SecurityConst(object): """ A class object that acts as essentially a namespace for Security constants. """ kSSLSessionOptionBreakOnServerAuth = 0 kSSLProtocol2 = 1 kSSLProtocol3 = 2 kTLSProtocol1 = 4 kTLSProtocol11 = 7 kTLSProtocol12 = 8 # SecureTransport does not support TLS 1.3 even if there's a constant for it kTLSProtocol13 = 10 kTLSProtocolMaxSupported = 999 kSSLClientSide = 1 kSSLStreamType = 0 kSecFormatPEMSequence = 10 kSecTrustResultInvalid = 0 kSecTrustResultProceed = 1 # This gap is present on purpose: this was kSecTrustResultConfirm, which # is deprecated. kSecTrustResultDeny = 3 kSecTrustResultUnspecified = 4 kSecTrustResultRecoverableTrustFailure = 5 kSecTrustResultFatalTrustFailure = 6 kSecTrustResultOtherError = 7 errSSLProtocol = -9800 errSSLWouldBlock = -9803 errSSLClosedGraceful = -9805 errSSLClosedNoNotify = -9816 errSSLClosedAbort = -9806 errSSLXCertChainInvalid = -9807 errSSLCrypto = -9809 errSSLInternal = -9810 errSSLCertExpired = -9814 errSSLCertNotYetValid = -9815 errSSLUnknownRootCert = -9812 errSSLNoRootCert = -9813 errSSLHostNameMismatch = -9843 errSSLPeerHandshakeFail = -9824 errSSLPeerUserCancelled = -9839 errSSLWeakPeerEphemeralDHKey = -9850 errSSLServerAuthCompleted = -9841 errSSLRecordOverflow = -9847 errSecVerifyFailed = -67808 errSecNoTrustSettings = -25263 errSecItemNotFound = -25300 errSecInvalidTrustSettings = -25262 # Cipher suites. We only pick the ones our default cipher string allows. # Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030 TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9 TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8 TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024 TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028 TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014 TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039 TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023 TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027 TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009 TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013 TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067 TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033 TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F TLS_AES_128_GCM_SHA256 = 0x1301 TLS_AES_256_GCM_SHA384 = 0x1302 TLS_AES_128_CCM_8_SHA256 = 0x1305 TLS_AES_128_CCM_SHA256 = 0x1304
17,632
Python
32.909615
96
0.735311
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/packages/backports/makefile.py
# -*- coding: utf-8 -*- """ backports.makefile ~~~~~~~~~~~~~~~~~~ Backports the Python 3 ``socket.makefile`` method for use with anything that wants to create a "fake" socket object. """ import io from socket import SocketIO def backport_makefile( self, mode="r", buffering=None, encoding=None, errors=None, newline=None ): """ Backport of ``socket.makefile`` from Python 3.5. """ if not set(mode) <= {"r", "w", "b"}: raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,)) writing = "w" in mode reading = "r" in mode or not writing assert reading or writing binary = "b" in mode rawmode = "" if reading: rawmode += "r" if writing: rawmode += "w" raw = SocketIO(self, rawmode) self._makefile_refs += 1 if buffering is None: buffering = -1 if buffering < 0: buffering = io.DEFAULT_BUFFER_SIZE if buffering == 0: if not binary: raise ValueError("unbuffered streams must be binary") return raw if reading and writing: buffer = io.BufferedRWPair(raw, raw, buffering) elif reading: buffer = io.BufferedReader(raw, buffering) else: assert writing buffer = io.BufferedWriter(raw, buffering) if binary: return buffer text = io.TextIOWrapper(buffer, encoding, errors, newline) text.mode = mode return text
1,417
Python
26.26923
76
0.605505
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/packages/backports/weakref_finalize.py
# -*- coding: utf-8 -*- """ backports.weakref_finalize ~~~~~~~~~~~~~~~~~~ Backports the Python 3 ``weakref.finalize`` method. """ from __future__ import absolute_import import itertools import sys from weakref import ref __all__ = ["weakref_finalize"] class weakref_finalize(object): """Class for finalization of weakrefable objects finalize(obj, func, *args, **kwargs) returns a callable finalizer object which will be called when obj is garbage collected. The first time the finalizer is called it evaluates func(*arg, **kwargs) and returns the result. After this the finalizer is dead, and calling it just returns None. When the program exits any remaining finalizers for which the atexit attribute is true will be run in reverse order of creation. By default atexit is true. """ # Finalizer objects don't have any state of their own. They are # just used as keys to lookup _Info objects in the registry. This # ensures that they cannot be part of a ref-cycle. __slots__ = () _registry = {} _shutdown = False _index_iter = itertools.count() _dirty = False _registered_with_atexit = False class _Info(object): __slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index") def __init__(self, obj, func, *args, **kwargs): if not self._registered_with_atexit: # We may register the exit function more than once because # of a thread race, but that is harmless import atexit atexit.register(self._exitfunc) weakref_finalize._registered_with_atexit = True info = self._Info() info.weakref = ref(obj, self) info.func = func info.args = args info.kwargs = kwargs or None info.atexit = True info.index = next(self._index_iter) self._registry[self] = info weakref_finalize._dirty = True def __call__(self, _=None): """If alive then mark as dead and return func(*args, **kwargs); otherwise return None""" info = self._registry.pop(self, None) if info and not self._shutdown: return info.func(*info.args, **(info.kwargs or {})) def detach(self): """If alive then mark as dead and return (obj, func, args, kwargs); otherwise return None""" info = self._registry.get(self) obj = info and info.weakref() if obj is not None and self._registry.pop(self, None): return (obj, info.func, info.args, info.kwargs or {}) def peek(self): """If alive then return (obj, func, args, kwargs); otherwise return None""" info = self._registry.get(self) obj = info and info.weakref() if obj is not None: return (obj, info.func, info.args, info.kwargs or {}) @property def alive(self): """Whether finalizer is alive""" return self in self._registry @property def atexit(self): """Whether finalizer should be called at exit""" info = self._registry.get(self) return bool(info) and info.atexit @atexit.setter def atexit(self, value): info = self._registry.get(self) if info: info.atexit = bool(value) def __repr__(self): info = self._registry.get(self) obj = info and info.weakref() if obj is None: return "<%s object at %#x; dead>" % (type(self).__name__, id(self)) else: return "<%s object at %#x; for %r at %#x>" % ( type(self).__name__, id(self), type(obj).__name__, id(obj), ) @classmethod def _select_for_exit(cls): # Return live finalizers marked for exit, oldest first L = [(f, i) for (f, i) in cls._registry.items() if i.atexit] L.sort(key=lambda item: item[1].index) return [f for (f, i) in L] @classmethod def _exitfunc(cls): # At shutdown invoke finalizers for which atexit is true. # This is called once all other non-daemonic threads have been # joined. reenable_gc = False try: if cls._registry: import gc if gc.isenabled(): reenable_gc = True gc.disable() pending = None while True: if pending is None or weakref_finalize._dirty: pending = cls._select_for_exit() weakref_finalize._dirty = False if not pending: break f = pending.pop() try: # gc is disabled, so (assuming no daemonic # threads) the following is the only line in # this function which might trigger creation # of a new finalizer f() except Exception: sys.excepthook(*sys.exc_info()) assert f not in cls._registry finally: # prevent any more finalizers from executing during shutdown weakref_finalize._shutdown = True if reenable_gc: gc.enable()
5,343
Python
33.25641
79
0.551563
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/frozenlist/__init__.py
import os import sys import types from collections.abc import MutableSequence from functools import total_ordering from typing import Tuple, Type __version__ = "1.3.3" __all__ = ("FrozenList", "PyFrozenList") # type: Tuple[str, ...] NO_EXTENSIONS = bool(os.environ.get("FROZENLIST_NO_EXTENSIONS")) # type: bool @total_ordering class FrozenList(MutableSequence): __slots__ = ("_frozen", "_items") if sys.version_info >= (3, 9): __class_getitem__ = classmethod(types.GenericAlias) else: @classmethod def __class_getitem__(cls: Type["FrozenList"]) -> Type["FrozenList"]: return cls def __init__(self, items=None): self._frozen = False if items is not None: items = list(items) else: items = [] self._items = items @property def frozen(self): return self._frozen def freeze(self): self._frozen = True def __getitem__(self, index): return self._items[index] def __setitem__(self, index, value): if self._frozen: raise RuntimeError("Cannot modify frozen list.") self._items[index] = value def __delitem__(self, index): if self._frozen: raise RuntimeError("Cannot modify frozen list.") del self._items[index] def __len__(self): return self._items.__len__() def __iter__(self): return self._items.__iter__() def __reversed__(self): return self._items.__reversed__() def __eq__(self, other): return list(self) == other def __le__(self, other): return list(self) <= other def insert(self, pos, item): if self._frozen: raise RuntimeError("Cannot modify frozen list.") self._items.insert(pos, item) def __repr__(self): return f"<FrozenList(frozen={self._frozen}, {self._items!r})>" def __hash__(self): if self._frozen: return hash(tuple(self)) else: raise RuntimeError("Cannot hash unfrozen list.") PyFrozenList = FrozenList try: from ._frozenlist import FrozenList as CFrozenList # type: ignore if not NO_EXTENSIONS: # pragma: no cover FrozenList = CFrozenList # type: ignore except ImportError: # pragma: no cover pass
2,323
Python
22.958763
78
0.583297
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/frozenlist/__init__.pyi
from typing import ( Generic, Iterable, Iterator, List, MutableSequence, Optional, TypeVar, Union, overload, ) _T = TypeVar("_T") _Arg = Union[List[_T], Iterable[_T]] class FrozenList(MutableSequence[_T], Generic[_T]): def __init__(self, items: Optional[_Arg[_T]] = None) -> None: ... @property def frozen(self) -> bool: ... def freeze(self) -> None: ... @overload def __getitem__(self, i: int) -> _T: ... @overload def __getitem__(self, s: slice) -> FrozenList[_T]: ... @overload def __setitem__(self, i: int, o: _T) -> None: ... @overload def __setitem__(self, s: slice, o: Iterable[_T]) -> None: ... @overload def __delitem__(self, i: int) -> None: ... @overload def __delitem__(self, i: slice) -> None: ... def __len__(self) -> int: ... def __iter__(self) -> Iterator[_T]: ... def __reversed__(self) -> Iterator[_T]: ... def __eq__(self, other: object) -> bool: ... def __le__(self, other: FrozenList[_T]) -> bool: ... def __ne__(self, other: object) -> bool: ... def __lt__(self, other: FrozenList[_T]) -> bool: ... def __ge__(self, other: FrozenList[_T]) -> bool: ... def __gt__(self, other: FrozenList[_T]) -> bool: ... def insert(self, pos: int, item: _T) -> None: ... def __repr__(self) -> str: ... def __hash__(self) -> int: ... # types for C accelerators are the same CFrozenList = PyFrozenList = FrozenList
1,470
unknown
29.645833
69
0.528571
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/httptools/_version.py
# This file MUST NOT contain anything but the __version__ assignment. # # When making a release, change the value of __version__ # to an appropriate value, and open a pull request against # the correct branch (master if making a new feature release). # The commit message MUST contain a properly formatted release # log, and the commit must be signed. # # The release automation will: build and test the packages for the # supported platforms, publish the packages on PyPI, merge the PR # to the target branch, create a Git tag pointing to the commit. __version__ = '0.4.0'
575
Python
40.142854
69
0.749565
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/httptools/__init__.py
from . import parser from .parser import * # NOQA from ._version import __version__ # NOQA __all__ = parser.__all__ + ('__version__',) # NOQA
147
Python
20.142854
51
0.591837
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/httptools/parser/url_parser.c
/* Generated by Cython 0.29.28 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [], "extra_compile_args": [ "-O2" ], "name": "httptools.parser.url_parser", "sources": [ "httptools/parser/url_parser.pyx" ] }, "module_name": "httptools.parser.url_parser" } END: Cython Metadata */ #ifndef PY_SSIZE_T_CLEAN #define PY_SSIZE_T_CLEAN #endif /* PY_SSIZE_T_CLEAN */ #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_28" #define CYTHON_HEX_VERSION 0x001D1CF0 #define CYTHON_FUTURE_DIVISION 1 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #if PY_VERSION_HEX >= 0x030B00A4 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #elif !defined(CYTHON_FAST_THREAD_STATE) #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL (PY_VERSION_HEX < 0x030B00A1) #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #if PY_VERSION_HEX >= 0x030B00A4 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif !defined(CYTHON_USE_EXC_INFO_STACK) #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #if PY_MAJOR_VERSION < 3 #include "longintrepr.h" #endif #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_DefaultClassType PyType_Type #if PY_VERSION_HEX >= 0x030B00A1 static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int k, int l, int s, int f, PyObject *code, PyObject *c, PyObject* n, PyObject *v, PyObject *fv, PyObject *cell, PyObject* fn, PyObject *name, int fline, PyObject *lnos) { PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL; PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *call_result=NULL, *empty=NULL; const char *fn_cstr=NULL; const char *name_cstr=NULL; PyCodeObject* co=NULL; PyObject *type, *value, *traceback; PyErr_Fetch(&type, &value, &traceback); if (!(kwds=PyDict_New())) goto end; if (!(argcount=PyLong_FromLong(a))) goto end; if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end; if (!(posonlyargcount=PyLong_FromLong(0))) goto end; if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end; if (!(kwonlyargcount=PyLong_FromLong(k))) goto end; if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end; if (!(nlocals=PyLong_FromLong(l))) goto end; if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end; if (!(stacksize=PyLong_FromLong(s))) goto end; if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end; if (!(flags=PyLong_FromLong(f))) goto end; if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end; if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end; if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end; if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end; if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end; if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end; if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end; if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end; if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end; if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end; if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end; if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto cleanup_code_too; if (!(empty = PyTuple_New(0))) goto cleanup_code_too; // unfortunately __pyx_empty_tuple isn't available here if (!(call_result = PyObject_Call(replace, empty, kwds))) goto cleanup_code_too; Py_XDECREF((PyObject*)co); co = (PyCodeObject*)call_result; call_result = NULL; if (0) { cleanup_code_too: Py_XDECREF((PyObject*)co); co = NULL; } end: Py_XDECREF(kwds); Py_XDECREF(argcount); Py_XDECREF(posonlyargcount); Py_XDECREF(kwonlyargcount); Py_XDECREF(nlocals); Py_XDECREF(stacksize); Py_XDECREF(replace); Py_XDECREF(call_result); Py_XDECREF(empty); if (type) { PyErr_Restore(type, value, traceback); } return co; } #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #if defined(PyUnicode_IS_READY) #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #else #define __Pyx_PyUnicode_READY(op) (0) #endif #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #endif #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__httptools__parser__url_parser #define __PYX_HAVE_API__httptools__parser__url_parser /* Early includes */ #include <string.h> #include <stdio.h> #include "pythread.h" #include <stdint.h> #include "http_parser.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "stringsource", "httptools\\parser\\url_parser.pyx", "type.pxd", "bool.pxd", "complex.pxd", }; /*--- Type declarations ---*/ struct __pyx_obj_9httptools_6parser_10url_parser_URL; /* "httptools/parser/url_parser.pyx":16 * * @cython.freelist(250) * cdef class URL: # <<<<<<<<<<<<<< * cdef readonly bytes schema * cdef readonly bytes host */ struct __pyx_obj_9httptools_6parser_10url_parser_URL { PyObject_HEAD PyObject *schema; PyObject *host; PyObject *port; PyObject *path; PyObject *query; PyObject *fragment; PyObject *userinfo; }; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif #if CYTHON_FAST_PYCALL static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif // CYTHON_FAST_PYCALL #endif /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* IncludeStringH.proto */ #include <string.h> /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* TypeImport.proto */ #ifndef __PYX_HAVE_RT_ImportType_proto #define __PYX_HAVE_RT_ImportType_proto enum __Pyx_ImportType_CheckSize { __Pyx_ImportType_CheckSize_Error = 0, __Pyx_ImportType_CheckSize_Warn = 1, __Pyx_ImportType_CheckSize_Ignore = 2 }; static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* GCCDiagnostics.proto */ #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) #define __Pyx_HAS_GCC_DIAGNOSTIC #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint16_t(uint16_t value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cpython.mem' */ /* Module declarations from 'cpython.version' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'cpython.exc' */ /* Module declarations from 'cpython.module' */ /* Module declarations from 'cpython.tuple' */ /* Module declarations from 'cpython.list' */ /* Module declarations from 'cpython.sequence' */ /* Module declarations from 'cpython.mapping' */ /* Module declarations from 'cpython.iterator' */ /* Module declarations from 'cpython.number' */ /* Module declarations from 'cpython.int' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.bool' */ static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; /* Module declarations from 'cpython.long' */ /* Module declarations from 'cpython.float' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.complex' */ static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; /* Module declarations from 'cpython.string' */ /* Module declarations from 'cpython.unicode' */ /* Module declarations from 'cpython.dict' */ /* Module declarations from 'cpython.instance' */ /* Module declarations from 'cpython.function' */ /* Module declarations from 'cpython.method' */ /* Module declarations from 'cpython.weakref' */ /* Module declarations from 'cpython.getargs' */ /* Module declarations from 'cpython.pythread' */ /* Module declarations from 'cpython.pystate' */ /* Module declarations from 'cpython.cobject' */ /* Module declarations from 'cpython.oldbuffer' */ /* Module declarations from 'cpython.set' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.bytes' */ /* Module declarations from 'cpython.pycapsule' */ /* Module declarations from 'cpython' */ /* Module declarations from 'cython' */ /* Module declarations from 'httptools.parser' */ /* Module declarations from 'libc.stdint' */ /* Module declarations from 'httptools.parser.url_cparser' */ /* Module declarations from 'httptools.parser.url_parser' */ static PyTypeObject *__pyx_ptype_9httptools_6parser_10url_parser_URL = 0; #define __Pyx_MODULE_NAME "httptools.parser.url_parser" extern int __pyx_module_is_main_httptools__parser__url_parser; int __pyx_module_is_main_httptools__parser__url_parser = 0; /* Implementation of 'httptools.parser.url_parser' */ static PyObject *__pyx_builtin_TypeError; static const char __pyx_k_ln[] = "ln"; static const char __pyx_k_URL[] = "URL"; static const char __pyx_k_all[] = "__all__"; static const char __pyx_k_off[] = "off"; static const char __pyx_k_res[] = "res"; static const char __pyx_k_url[] = "url"; static const char __pyx_k_host[] = "host"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_name[] = "__name__"; static const char __pyx_k_path[] = "path"; static const char __pyx_k_port[] = "port"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_query[] = "query"; static const char __pyx_k_errors[] = "errors"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_parsed[] = "parsed"; static const char __pyx_k_py_buf[] = "py_buf"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_result[] = "result"; static const char __pyx_k_schema[] = "schema"; static const char __pyx_k_buf_data[] = "buf_data"; static const char __pyx_k_fragment[] = "fragment"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_userinfo[] = "userinfo"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_parse_url[] = "parse_url"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_invalid_url_r[] = "invalid url {!r}"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_HttpParserInvalidURLError[] = "HttpParserInvalidURLError"; static const char __pyx_k_httptools_parser_url_parser[] = "httptools.parser.url_parser"; static const char __pyx_k_URL_schema_r_host_r_port_r_path[] = "<URL schema: {!r}, host: {!r}, port: {!r}, path: {!r}, query: {!r}, fragment: {!r}, userinfo: {!r}>"; static const char __pyx_k_httptools_parser_url_parser_pyx[] = "httptools\\parser\\url_parser.pyx"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static PyObject *__pyx_n_s_HttpParserInvalidURLError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_n_s_URL; static PyObject *__pyx_kp_u_URL_schema_r_host_r_port_r_path; static PyObject *__pyx_n_s_all; static PyObject *__pyx_n_s_buf_data; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_errors; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fragment; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_n_s_host; static PyObject *__pyx_n_s_httptools_parser_url_parser; static PyObject *__pyx_kp_s_httptools_parser_url_parser_pyx; static PyObject *__pyx_n_s_import; static PyObject *__pyx_kp_u_invalid_url_r; static PyObject *__pyx_n_s_ln; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_name; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_off; static PyObject *__pyx_n_s_parse_url; static PyObject *__pyx_n_u_parse_url; static PyObject *__pyx_n_s_parsed; static PyObject *__pyx_n_s_path; static PyObject *__pyx_n_s_port; static PyObject *__pyx_n_s_py_buf; static PyObject *__pyx_n_s_query; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_res; static PyObject *__pyx_n_s_result; static PyObject *__pyx_n_s_schema; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_url; static PyObject *__pyx_n_s_userinfo; static int __pyx_pf_9httptools_6parser_10url_parser_3URL___cinit__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self, PyObject *__pyx_v_schema, PyObject *__pyx_v_host, PyObject *__pyx_v_port, PyObject *__pyx_v_path, PyObject *__pyx_v_query, PyObject *__pyx_v_fragment, PyObject *__pyx_v_userinfo); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_2__repr__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_6schema___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4host___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4port___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4path___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_5query___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_8fragment___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_8userinfo___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_6__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_10url_parser_parse_url(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_url); /* proto */ static PyObject *__pyx_tp_new_9httptools_6parser_10url_parser_URL(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_codeobj__5; /* Late includes */ /* "httptools/parser/url_parser.pyx":25 * cdef readonly bytes userinfo * * def __cinit__(self, bytes schema, bytes host, object port, bytes path, # <<<<<<<<<<<<<< * bytes query, bytes fragment, bytes userinfo): * */ /* Python wrapper */ static int __pyx_pw_9httptools_6parser_10url_parser_3URL_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_9httptools_6parser_10url_parser_3URL_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_schema = 0; PyObject *__pyx_v_host = 0; PyObject *__pyx_v_port = 0; PyObject *__pyx_v_path = 0; PyObject *__pyx_v_query = 0; PyObject *__pyx_v_fragment = 0; PyObject *__pyx_v_userinfo = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_schema,&__pyx_n_s_host,&__pyx_n_s_port,&__pyx_n_s_path,&__pyx_n_s_query,&__pyx_n_s_fragment,&__pyx_n_s_userinfo,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_schema)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_host)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 1); __PYX_ERR(1, 25, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_port)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 2); __PYX_ERR(1, 25, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_path)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 3); __PYX_ERR(1, 25, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_query)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 4); __PYX_ERR(1, 25, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_fragment)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 5); __PYX_ERR(1, 25, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 6: if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_userinfo)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 6); __PYX_ERR(1, 25, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 25, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); } __pyx_v_schema = ((PyObject*)values[0]); __pyx_v_host = ((PyObject*)values[1]); __pyx_v_port = values[2]; __pyx_v_path = ((PyObject*)values[3]); __pyx_v_query = ((PyObject*)values[4]); __pyx_v_fragment = ((PyObject*)values[5]); __pyx_v_userinfo = ((PyObject*)values[6]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 25, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("httptools.parser.url_parser.URL.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_schema), (&PyBytes_Type), 1, "schema", 1))) __PYX_ERR(1, 25, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_host), (&PyBytes_Type), 1, "host", 1))) __PYX_ERR(1, 25, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_path), (&PyBytes_Type), 1, "path", 1))) __PYX_ERR(1, 25, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_query), (&PyBytes_Type), 1, "query", 1))) __PYX_ERR(1, 26, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_fragment), (&PyBytes_Type), 1, "fragment", 1))) __PYX_ERR(1, 26, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_userinfo), (&PyBytes_Type), 1, "userinfo", 1))) __PYX_ERR(1, 26, __pyx_L1_error) __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL___cinit__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self), __pyx_v_schema, __pyx_v_host, __pyx_v_port, __pyx_v_path, __pyx_v_query, __pyx_v_fragment, __pyx_v_userinfo); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_9httptools_6parser_10url_parser_3URL___cinit__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self, PyObject *__pyx_v_schema, PyObject *__pyx_v_host, PyObject *__pyx_v_port, PyObject *__pyx_v_path, PyObject *__pyx_v_query, PyObject *__pyx_v_fragment, PyObject *__pyx_v_userinfo) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__", 0); /* "httptools/parser/url_parser.pyx":28 * bytes query, bytes fragment, bytes userinfo): * * self.schema = schema # <<<<<<<<<<<<<< * self.host = host * self.port = port */ __Pyx_INCREF(__pyx_v_schema); __Pyx_GIVEREF(__pyx_v_schema); __Pyx_GOTREF(__pyx_v_self->schema); __Pyx_DECREF(__pyx_v_self->schema); __pyx_v_self->schema = __pyx_v_schema; /* "httptools/parser/url_parser.pyx":29 * * self.schema = schema * self.host = host # <<<<<<<<<<<<<< * self.port = port * self.path = path */ __Pyx_INCREF(__pyx_v_host); __Pyx_GIVEREF(__pyx_v_host); __Pyx_GOTREF(__pyx_v_self->host); __Pyx_DECREF(__pyx_v_self->host); __pyx_v_self->host = __pyx_v_host; /* "httptools/parser/url_parser.pyx":30 * self.schema = schema * self.host = host * self.port = port # <<<<<<<<<<<<<< * self.path = path * self.query = query */ __Pyx_INCREF(__pyx_v_port); __Pyx_GIVEREF(__pyx_v_port); __Pyx_GOTREF(__pyx_v_self->port); __Pyx_DECREF(__pyx_v_self->port); __pyx_v_self->port = __pyx_v_port; /* "httptools/parser/url_parser.pyx":31 * self.host = host * self.port = port * self.path = path # <<<<<<<<<<<<<< * self.query = query * self.fragment = fragment */ __Pyx_INCREF(__pyx_v_path); __Pyx_GIVEREF(__pyx_v_path); __Pyx_GOTREF(__pyx_v_self->path); __Pyx_DECREF(__pyx_v_self->path); __pyx_v_self->path = __pyx_v_path; /* "httptools/parser/url_parser.pyx":32 * self.port = port * self.path = path * self.query = query # <<<<<<<<<<<<<< * self.fragment = fragment * self.userinfo = userinfo */ __Pyx_INCREF(__pyx_v_query); __Pyx_GIVEREF(__pyx_v_query); __Pyx_GOTREF(__pyx_v_self->query); __Pyx_DECREF(__pyx_v_self->query); __pyx_v_self->query = __pyx_v_query; /* "httptools/parser/url_parser.pyx":33 * self.path = path * self.query = query * self.fragment = fragment # <<<<<<<<<<<<<< * self.userinfo = userinfo * */ __Pyx_INCREF(__pyx_v_fragment); __Pyx_GIVEREF(__pyx_v_fragment); __Pyx_GOTREF(__pyx_v_self->fragment); __Pyx_DECREF(__pyx_v_self->fragment); __pyx_v_self->fragment = __pyx_v_fragment; /* "httptools/parser/url_parser.pyx":34 * self.query = query * self.fragment = fragment * self.userinfo = userinfo # <<<<<<<<<<<<<< * * def __repr__(self): */ __Pyx_INCREF(__pyx_v_userinfo); __Pyx_GIVEREF(__pyx_v_userinfo); __Pyx_GOTREF(__pyx_v_self->userinfo); __Pyx_DECREF(__pyx_v_self->userinfo); __pyx_v_self->userinfo = __pyx_v_userinfo; /* "httptools/parser/url_parser.pyx":25 * cdef readonly bytes userinfo * * def __cinit__(self, bytes schema, bytes host, object port, bytes path, # <<<<<<<<<<<<<< * bytes query, bytes fragment, bytes userinfo): * */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/url_parser.pyx":36 * self.userinfo = userinfo * * def __repr__(self): # <<<<<<<<<<<<<< * return ('<URL schema: {!r}, host: {!r}, port: {!r}, path: {!r}, ' * 'query: {!r}, fragment: {!r}, userinfo: {!r}>' */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_3__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_3__repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_2__repr__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_2__repr__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "httptools/parser/url_parser.pyx":37 * * def __repr__(self): * return ('<URL schema: {!r}, host: {!r}, port: {!r}, path: {!r}, ' # <<<<<<<<<<<<<< * 'query: {!r}, fragment: {!r}, userinfo: {!r}>' * .format(self.schema, self.host, self.port, self.path, */ __Pyx_XDECREF(__pyx_r); /* "httptools/parser/url_parser.pyx":39 * return ('<URL schema: {!r}, host: {!r}, port: {!r}, path: {!r}, ' * 'query: {!r}, fragment: {!r}, userinfo: {!r}>' * .format(self.schema, self.host, self.port, self.path, # <<<<<<<<<<<<<< * self.query, self.fragment, self.userinfo)) * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_URL_schema_r_host_r_port_r_path, __pyx_n_s_format); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 39, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "httptools/parser/url_parser.pyx":40 * 'query: {!r}, fragment: {!r}, userinfo: {!r}>' * .format(self.schema, self.host, self.port, self.path, * self.query, self.fragment, self.userinfo)) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = NULL; __pyx_t_4 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_4 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[8] = {__pyx_t_3, __pyx_v_self->schema, __pyx_v_self->host, __pyx_v_self->port, __pyx_v_self->path, __pyx_v_self->query, __pyx_v_self->fragment, __pyx_v_self->userinfo}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_4, 7+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 39, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[8] = {__pyx_t_3, __pyx_v_self->schema, __pyx_v_self->host, __pyx_v_self->port, __pyx_v_self->path, __pyx_v_self->query, __pyx_v_self->fragment, __pyx_v_self->userinfo}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_4, 7+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 39, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_5 = PyTuple_New(7+__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 39, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __pyx_t_3 = NULL; } __Pyx_INCREF(__pyx_v_self->schema); __Pyx_GIVEREF(__pyx_v_self->schema); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_4, __pyx_v_self->schema); __Pyx_INCREF(__pyx_v_self->host); __Pyx_GIVEREF(__pyx_v_self->host); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_4, __pyx_v_self->host); __Pyx_INCREF(__pyx_v_self->port); __Pyx_GIVEREF(__pyx_v_self->port); PyTuple_SET_ITEM(__pyx_t_5, 2+__pyx_t_4, __pyx_v_self->port); __Pyx_INCREF(__pyx_v_self->path); __Pyx_GIVEREF(__pyx_v_self->path); PyTuple_SET_ITEM(__pyx_t_5, 3+__pyx_t_4, __pyx_v_self->path); __Pyx_INCREF(__pyx_v_self->query); __Pyx_GIVEREF(__pyx_v_self->query); PyTuple_SET_ITEM(__pyx_t_5, 4+__pyx_t_4, __pyx_v_self->query); __Pyx_INCREF(__pyx_v_self->fragment); __Pyx_GIVEREF(__pyx_v_self->fragment); PyTuple_SET_ITEM(__pyx_t_5, 5+__pyx_t_4, __pyx_v_self->fragment); __Pyx_INCREF(__pyx_v_self->userinfo); __Pyx_GIVEREF(__pyx_v_self->userinfo); PyTuple_SET_ITEM(__pyx_t_5, 6+__pyx_t_4, __pyx_v_self->userinfo); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 39, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "httptools/parser/url_parser.pyx":36 * self.userinfo = userinfo * * def __repr__(self): # <<<<<<<<<<<<<< * return ('<URL schema: {!r}, host: {!r}, port: {!r}, path: {!r}, ' * 'query: {!r}, fragment: {!r}, userinfo: {!r}>' */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("httptools.parser.url_parser.URL.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/url_parser.pyx":17 * @cython.freelist(250) * cdef class URL: * cdef readonly bytes schema # <<<<<<<<<<<<<< * cdef readonly bytes host * cdef readonly object port */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_6schema_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_6schema_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_6schema___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_6schema___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->schema); __pyx_r = __pyx_v_self->schema; goto __pyx_L0; /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/url_parser.pyx":18 * cdef class URL: * cdef readonly bytes schema * cdef readonly bytes host # <<<<<<<<<<<<<< * cdef readonly object port * cdef readonly bytes path */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_4host_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_4host_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_4host___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4host___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->host); __pyx_r = __pyx_v_self->host; goto __pyx_L0; /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/url_parser.pyx":19 * cdef readonly bytes schema * cdef readonly bytes host * cdef readonly object port # <<<<<<<<<<<<<< * cdef readonly bytes path * cdef readonly bytes query */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_4port_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_4port_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_4port___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4port___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->port); __pyx_r = __pyx_v_self->port; goto __pyx_L0; /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/url_parser.pyx":20 * cdef readonly bytes host * cdef readonly object port * cdef readonly bytes path # <<<<<<<<<<<<<< * cdef readonly bytes query * cdef readonly bytes fragment */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_4path_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_4path_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_4path___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4path___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->path); __pyx_r = __pyx_v_self->path; goto __pyx_L0; /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/url_parser.pyx":21 * cdef readonly object port * cdef readonly bytes path * cdef readonly bytes query # <<<<<<<<<<<<<< * cdef readonly bytes fragment * cdef readonly bytes userinfo */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_5query_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_5query_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_5query___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_5query___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->query); __pyx_r = __pyx_v_self->query; goto __pyx_L0; /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/url_parser.pyx":22 * cdef readonly bytes path * cdef readonly bytes query * cdef readonly bytes fragment # <<<<<<<<<<<<<< * cdef readonly bytes userinfo * */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_8fragment_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_8fragment_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_8fragment___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_8fragment___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->fragment); __pyx_r = __pyx_v_self->fragment; goto __pyx_L0; /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/url_parser.pyx":23 * cdef readonly bytes query * cdef readonly bytes fragment * cdef readonly bytes userinfo # <<<<<<<<<<<<<< * * def __cinit__(self, bytes schema, bytes host, object port, bytes path, */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_8userinfo_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_8userinfo_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_8userinfo___get__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_8userinfo___get__(struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->userinfo); __pyx_r = __pyx_v_self->userinfo; goto __pyx_L0; /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_4__reduce_cython__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_4__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("httptools.parser.url_parser.URL.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_3URL_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_3URL_6__setstate_cython__(((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_10url_parser_3URL_6__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("httptools.parser.url_parser.URL.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/url_parser.pyx":43 * * * def parse_url(url): # <<<<<<<<<<<<<< * cdef: * Py_buffer py_buf */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_10url_parser_1parse_url(PyObject *__pyx_self, PyObject *__pyx_v_url); /*proto*/ static PyMethodDef __pyx_mdef_9httptools_6parser_10url_parser_1parse_url = {"parse_url", (PyCFunction)__pyx_pw_9httptools_6parser_10url_parser_1parse_url, METH_O, 0}; static PyObject *__pyx_pw_9httptools_6parser_10url_parser_1parse_url(PyObject *__pyx_self, PyObject *__pyx_v_url) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("parse_url (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_10url_parser_parse_url(__pyx_self, ((PyObject *)__pyx_v_url)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_10url_parser_parse_url(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_url) { Py_buffer __pyx_v_py_buf; char *__pyx_v_buf_data; struct http_parser_url *__pyx_v_parsed; int __pyx_v_res; PyObject *__pyx_v_schema = 0; PyObject *__pyx_v_host = 0; PyObject *__pyx_v_port = 0; PyObject *__pyx_v_path = 0; PyObject *__pyx_v_query = 0; PyObject *__pyx_v_fragment = 0; PyObject *__pyx_v_userinfo = 0; CYTHON_UNUSED PyObject *__pyx_v_result = 0; int __pyx_v_off; int __pyx_v_ln; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; uint16_t __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; char const *__pyx_t_10; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; PyObject *__pyx_t_13 = NULL; PyObject *__pyx_t_14 = NULL; PyObject *__pyx_t_15 = NULL; PyObject *__pyx_t_16 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("parse_url", 0); /* "httptools/parser/url_parser.pyx":49 * uparser.http_parser_url* parsed * int res * bytes schema = None # <<<<<<<<<<<<<< * bytes host = None * object port = None */ __Pyx_INCREF(Py_None); __pyx_v_schema = ((PyObject*)Py_None); /* "httptools/parser/url_parser.pyx":50 * int res * bytes schema = None * bytes host = None # <<<<<<<<<<<<<< * object port = None * bytes path = None */ __Pyx_INCREF(Py_None); __pyx_v_host = ((PyObject*)Py_None); /* "httptools/parser/url_parser.pyx":51 * bytes schema = None * bytes host = None * object port = None # <<<<<<<<<<<<<< * bytes path = None * bytes query = None */ __Pyx_INCREF(Py_None); __pyx_v_port = Py_None; /* "httptools/parser/url_parser.pyx":52 * bytes host = None * object port = None * bytes path = None # <<<<<<<<<<<<<< * bytes query = None * bytes fragment = None */ __Pyx_INCREF(Py_None); __pyx_v_path = ((PyObject*)Py_None); /* "httptools/parser/url_parser.pyx":53 * object port = None * bytes path = None * bytes query = None # <<<<<<<<<<<<<< * bytes fragment = None * bytes userinfo = None */ __Pyx_INCREF(Py_None); __pyx_v_query = ((PyObject*)Py_None); /* "httptools/parser/url_parser.pyx":54 * bytes path = None * bytes query = None * bytes fragment = None # <<<<<<<<<<<<<< * bytes userinfo = None * object result = None */ __Pyx_INCREF(Py_None); __pyx_v_fragment = ((PyObject*)Py_None); /* "httptools/parser/url_parser.pyx":55 * bytes query = None * bytes fragment = None * bytes userinfo = None # <<<<<<<<<<<<<< * object result = None * int off */ __Pyx_INCREF(Py_None); __pyx_v_userinfo = ((PyObject*)Py_None); /* "httptools/parser/url_parser.pyx":56 * bytes fragment = None * bytes userinfo = None * object result = None # <<<<<<<<<<<<<< * int off * int ln */ __Pyx_INCREF(Py_None); __pyx_v_result = Py_None; /* "httptools/parser/url_parser.pyx":60 * int ln * * parsed = <uparser.http_parser_url*> \ # <<<<<<<<<<<<<< * PyMem_Malloc(sizeof(uparser.http_parser_url)) * uparser.http_parser_url_init(parsed) */ __pyx_v_parsed = ((struct http_parser_url *)PyMem_Malloc((sizeof(struct http_parser_url)))); /* "httptools/parser/url_parser.pyx":62 * parsed = <uparser.http_parser_url*> \ * PyMem_Malloc(sizeof(uparser.http_parser_url)) * uparser.http_parser_url_init(parsed) # <<<<<<<<<<<<<< * * PyObject_GetBuffer(url, &py_buf, PyBUF_SIMPLE) */ http_parser_url_init(__pyx_v_parsed); /* "httptools/parser/url_parser.pyx":64 * uparser.http_parser_url_init(parsed) * * PyObject_GetBuffer(url, &py_buf, PyBUF_SIMPLE) # <<<<<<<<<<<<<< * try: * buf_data = <char*>py_buf.buf */ __pyx_t_1 = PyObject_GetBuffer(__pyx_v_url, (&__pyx_v_py_buf), PyBUF_SIMPLE); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 64, __pyx_L1_error) /* "httptools/parser/url_parser.pyx":65 * * PyObject_GetBuffer(url, &py_buf, PyBUF_SIMPLE) * try: # <<<<<<<<<<<<<< * buf_data = <char*>py_buf.buf * res = uparser.http_parser_parse_url(buf_data, py_buf.len, 0, parsed) */ /*try:*/ { /* "httptools/parser/url_parser.pyx":66 * PyObject_GetBuffer(url, &py_buf, PyBUF_SIMPLE) * try: * buf_data = <char*>py_buf.buf # <<<<<<<<<<<<<< * res = uparser.http_parser_parse_url(buf_data, py_buf.len, 0, parsed) * */ __pyx_v_buf_data = ((char *)__pyx_v_py_buf.buf); /* "httptools/parser/url_parser.pyx":67 * try: * buf_data = <char*>py_buf.buf * res = uparser.http_parser_parse_url(buf_data, py_buf.len, 0, parsed) # <<<<<<<<<<<<<< * * if res == 0: */ __pyx_v_res = http_parser_parse_url(__pyx_v_buf_data, __pyx_v_py_buf.len, 0, __pyx_v_parsed); /* "httptools/parser/url_parser.pyx":69 * res = uparser.http_parser_parse_url(buf_data, py_buf.len, 0, parsed) * * if res == 0: # <<<<<<<<<<<<<< * if parsed.field_set & (1 << uparser.UF_SCHEMA): * off = parsed.field_data[<int>uparser.UF_SCHEMA].off */ __pyx_t_2 = ((__pyx_v_res == 0) != 0); if (likely(__pyx_t_2)) { /* "httptools/parser/url_parser.pyx":70 * * if res == 0: * if parsed.field_set & (1 << uparser.UF_SCHEMA): # <<<<<<<<<<<<<< * off = parsed.field_data[<int>uparser.UF_SCHEMA].off * ln = parsed.field_data[<int>uparser.UF_SCHEMA].len */ __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_SCHEMA)) != 0); if (__pyx_t_2) { /* "httptools/parser/url_parser.pyx":71 * if res == 0: * if parsed.field_set & (1 << uparser.UF_SCHEMA): * off = parsed.field_data[<int>uparser.UF_SCHEMA].off # <<<<<<<<<<<<<< * ln = parsed.field_data[<int>uparser.UF_SCHEMA].len * schema = buf_data[off:off+ln] */ __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_SCHEMA)]).off; __pyx_v_off = __pyx_t_3; /* "httptools/parser/url_parser.pyx":72 * if parsed.field_set & (1 << uparser.UF_SCHEMA): * off = parsed.field_data[<int>uparser.UF_SCHEMA].off * ln = parsed.field_data[<int>uparser.UF_SCHEMA].len # <<<<<<<<<<<<<< * schema = buf_data[off:off+ln] * */ __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_SCHEMA)]).len; __pyx_v_ln = __pyx_t_3; /* "httptools/parser/url_parser.pyx":73 * off = parsed.field_data[<int>uparser.UF_SCHEMA].off * ln = parsed.field_data[<int>uparser.UF_SCHEMA].len * schema = buf_data[off:off+ln] # <<<<<<<<<<<<<< * * if parsed.field_set & (1 << uparser.UF_HOST): */ __pyx_t_4 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_buf_data + __pyx_v_off, (__pyx_v_off + __pyx_v_ln) - __pyx_v_off); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 73, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF_SET(__pyx_v_schema, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "httptools/parser/url_parser.pyx":70 * * if res == 0: * if parsed.field_set & (1 << uparser.UF_SCHEMA): # <<<<<<<<<<<<<< * off = parsed.field_data[<int>uparser.UF_SCHEMA].off * ln = parsed.field_data[<int>uparser.UF_SCHEMA].len */ } /* "httptools/parser/url_parser.pyx":75 * schema = buf_data[off:off+ln] * * if parsed.field_set & (1 << uparser.UF_HOST): # <<<<<<<<<<<<<< * off = parsed.field_data[<int>uparser.UF_HOST].off * ln = parsed.field_data[<int>uparser.UF_HOST].len */ __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_HOST)) != 0); if (__pyx_t_2) { /* "httptools/parser/url_parser.pyx":76 * * if parsed.field_set & (1 << uparser.UF_HOST): * off = parsed.field_data[<int>uparser.UF_HOST].off # <<<<<<<<<<<<<< * ln = parsed.field_data[<int>uparser.UF_HOST].len * host = buf_data[off:off+ln] */ __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_HOST)]).off; __pyx_v_off = __pyx_t_3; /* "httptools/parser/url_parser.pyx":77 * if parsed.field_set & (1 << uparser.UF_HOST): * off = parsed.field_data[<int>uparser.UF_HOST].off * ln = parsed.field_data[<int>uparser.UF_HOST].len # <<<<<<<<<<<<<< * host = buf_data[off:off+ln] * */ __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_HOST)]).len; __pyx_v_ln = __pyx_t_3; /* "httptools/parser/url_parser.pyx":78 * off = parsed.field_data[<int>uparser.UF_HOST].off * ln = parsed.field_data[<int>uparser.UF_HOST].len * host = buf_data[off:off+ln] # <<<<<<<<<<<<<< * * if parsed.field_set & (1 << uparser.UF_PORT): */ __pyx_t_4 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_buf_data + __pyx_v_off, (__pyx_v_off + __pyx_v_ln) - __pyx_v_off); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 78, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF_SET(__pyx_v_host, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "httptools/parser/url_parser.pyx":75 * schema = buf_data[off:off+ln] * * if parsed.field_set & (1 << uparser.UF_HOST): # <<<<<<<<<<<<<< * off = parsed.field_data[<int>uparser.UF_HOST].off * ln = parsed.field_data[<int>uparser.UF_HOST].len */ } /* "httptools/parser/url_parser.pyx":80 * host = buf_data[off:off+ln] * * if parsed.field_set & (1 << uparser.UF_PORT): # <<<<<<<<<<<<<< * port = parsed.port * */ __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_PORT)) != 0); if (__pyx_t_2) { /* "httptools/parser/url_parser.pyx":81 * * if parsed.field_set & (1 << uparser.UF_PORT): * port = parsed.port # <<<<<<<<<<<<<< * * if parsed.field_set & (1 << uparser.UF_PATH): */ __pyx_t_4 = __Pyx_PyInt_From_uint16_t(__pyx_v_parsed->port); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 81, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF_SET(__pyx_v_port, __pyx_t_4); __pyx_t_4 = 0; /* "httptools/parser/url_parser.pyx":80 * host = buf_data[off:off+ln] * * if parsed.field_set & (1 << uparser.UF_PORT): # <<<<<<<<<<<<<< * port = parsed.port * */ } /* "httptools/parser/url_parser.pyx":83 * port = parsed.port * * if parsed.field_set & (1 << uparser.UF_PATH): # <<<<<<<<<<<<<< * off = parsed.field_data[<int>uparser.UF_PATH].off * ln = parsed.field_data[<int>uparser.UF_PATH].len */ __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_PATH)) != 0); if (__pyx_t_2) { /* "httptools/parser/url_parser.pyx":84 * * if parsed.field_set & (1 << uparser.UF_PATH): * off = parsed.field_data[<int>uparser.UF_PATH].off # <<<<<<<<<<<<<< * ln = parsed.field_data[<int>uparser.UF_PATH].len * path = buf_data[off:off+ln] */ __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_PATH)]).off; __pyx_v_off = __pyx_t_3; /* "httptools/parser/url_parser.pyx":85 * if parsed.field_set & (1 << uparser.UF_PATH): * off = parsed.field_data[<int>uparser.UF_PATH].off * ln = parsed.field_data[<int>uparser.UF_PATH].len # <<<<<<<<<<<<<< * path = buf_data[off:off+ln] * */ __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_PATH)]).len; __pyx_v_ln = __pyx_t_3; /* "httptools/parser/url_parser.pyx":86 * off = parsed.field_data[<int>uparser.UF_PATH].off * ln = parsed.field_data[<int>uparser.UF_PATH].len * path = buf_data[off:off+ln] # <<<<<<<<<<<<<< * * if parsed.field_set & (1 << uparser.UF_QUERY): */ __pyx_t_4 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_buf_data + __pyx_v_off, (__pyx_v_off + __pyx_v_ln) - __pyx_v_off); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 86, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF_SET(__pyx_v_path, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "httptools/parser/url_parser.pyx":83 * port = parsed.port * * if parsed.field_set & (1 << uparser.UF_PATH): # <<<<<<<<<<<<<< * off = parsed.field_data[<int>uparser.UF_PATH].off * ln = parsed.field_data[<int>uparser.UF_PATH].len */ } /* "httptools/parser/url_parser.pyx":88 * path = buf_data[off:off+ln] * * if parsed.field_set & (1 << uparser.UF_QUERY): # <<<<<<<<<<<<<< * off = parsed.field_data[<int>uparser.UF_QUERY].off * ln = parsed.field_data[<int>uparser.UF_QUERY].len */ __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_QUERY)) != 0); if (__pyx_t_2) { /* "httptools/parser/url_parser.pyx":89 * * if parsed.field_set & (1 << uparser.UF_QUERY): * off = parsed.field_data[<int>uparser.UF_QUERY].off # <<<<<<<<<<<<<< * ln = parsed.field_data[<int>uparser.UF_QUERY].len * query = buf_data[off:off+ln] */ __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_QUERY)]).off; __pyx_v_off = __pyx_t_3; /* "httptools/parser/url_parser.pyx":90 * if parsed.field_set & (1 << uparser.UF_QUERY): * off = parsed.field_data[<int>uparser.UF_QUERY].off * ln = parsed.field_data[<int>uparser.UF_QUERY].len # <<<<<<<<<<<<<< * query = buf_data[off:off+ln] * */ __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_QUERY)]).len; __pyx_v_ln = __pyx_t_3; /* "httptools/parser/url_parser.pyx":91 * off = parsed.field_data[<int>uparser.UF_QUERY].off * ln = parsed.field_data[<int>uparser.UF_QUERY].len * query = buf_data[off:off+ln] # <<<<<<<<<<<<<< * * if parsed.field_set & (1 << uparser.UF_FRAGMENT): */ __pyx_t_4 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_buf_data + __pyx_v_off, (__pyx_v_off + __pyx_v_ln) - __pyx_v_off); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 91, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF_SET(__pyx_v_query, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "httptools/parser/url_parser.pyx":88 * path = buf_data[off:off+ln] * * if parsed.field_set & (1 << uparser.UF_QUERY): # <<<<<<<<<<<<<< * off = parsed.field_data[<int>uparser.UF_QUERY].off * ln = parsed.field_data[<int>uparser.UF_QUERY].len */ } /* "httptools/parser/url_parser.pyx":93 * query = buf_data[off:off+ln] * * if parsed.field_set & (1 << uparser.UF_FRAGMENT): # <<<<<<<<<<<<<< * off = parsed.field_data[<int>uparser.UF_FRAGMENT].off * ln = parsed.field_data[<int>uparser.UF_FRAGMENT].len */ __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_FRAGMENT)) != 0); if (__pyx_t_2) { /* "httptools/parser/url_parser.pyx":94 * * if parsed.field_set & (1 << uparser.UF_FRAGMENT): * off = parsed.field_data[<int>uparser.UF_FRAGMENT].off # <<<<<<<<<<<<<< * ln = parsed.field_data[<int>uparser.UF_FRAGMENT].len * fragment = buf_data[off:off+ln] */ __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_FRAGMENT)]).off; __pyx_v_off = __pyx_t_3; /* "httptools/parser/url_parser.pyx":95 * if parsed.field_set & (1 << uparser.UF_FRAGMENT): * off = parsed.field_data[<int>uparser.UF_FRAGMENT].off * ln = parsed.field_data[<int>uparser.UF_FRAGMENT].len # <<<<<<<<<<<<<< * fragment = buf_data[off:off+ln] * */ __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_FRAGMENT)]).len; __pyx_v_ln = __pyx_t_3; /* "httptools/parser/url_parser.pyx":96 * off = parsed.field_data[<int>uparser.UF_FRAGMENT].off * ln = parsed.field_data[<int>uparser.UF_FRAGMENT].len * fragment = buf_data[off:off+ln] # <<<<<<<<<<<<<< * * if parsed.field_set & (1 << uparser.UF_USERINFO): */ __pyx_t_4 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_buf_data + __pyx_v_off, (__pyx_v_off + __pyx_v_ln) - __pyx_v_off); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 96, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF_SET(__pyx_v_fragment, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "httptools/parser/url_parser.pyx":93 * query = buf_data[off:off+ln] * * if parsed.field_set & (1 << uparser.UF_FRAGMENT): # <<<<<<<<<<<<<< * off = parsed.field_data[<int>uparser.UF_FRAGMENT].off * ln = parsed.field_data[<int>uparser.UF_FRAGMENT].len */ } /* "httptools/parser/url_parser.pyx":98 * fragment = buf_data[off:off+ln] * * if parsed.field_set & (1 << uparser.UF_USERINFO): # <<<<<<<<<<<<<< * off = parsed.field_data[<int>uparser.UF_USERINFO].off * ln = parsed.field_data[<int>uparser.UF_USERINFO].len */ __pyx_t_2 = ((__pyx_v_parsed->field_set & (1 << UF_USERINFO)) != 0); if (__pyx_t_2) { /* "httptools/parser/url_parser.pyx":99 * * if parsed.field_set & (1 << uparser.UF_USERINFO): * off = parsed.field_data[<int>uparser.UF_USERINFO].off # <<<<<<<<<<<<<< * ln = parsed.field_data[<int>uparser.UF_USERINFO].len * userinfo = buf_data[off:off+ln] */ __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_USERINFO)]).off; __pyx_v_off = __pyx_t_3; /* "httptools/parser/url_parser.pyx":100 * if parsed.field_set & (1 << uparser.UF_USERINFO): * off = parsed.field_data[<int>uparser.UF_USERINFO].off * ln = parsed.field_data[<int>uparser.UF_USERINFO].len # <<<<<<<<<<<<<< * userinfo = buf_data[off:off+ln] * */ __pyx_t_3 = (__pyx_v_parsed->field_data[((int)UF_USERINFO)]).len; __pyx_v_ln = __pyx_t_3; /* "httptools/parser/url_parser.pyx":101 * off = parsed.field_data[<int>uparser.UF_USERINFO].off * ln = parsed.field_data[<int>uparser.UF_USERINFO].len * userinfo = buf_data[off:off+ln] # <<<<<<<<<<<<<< * * return URL(schema, host, port, path, query, fragment, userinfo) */ __pyx_t_4 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_buf_data + __pyx_v_off, (__pyx_v_off + __pyx_v_ln) - __pyx_v_off); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 101, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF_SET(__pyx_v_userinfo, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "httptools/parser/url_parser.pyx":98 * fragment = buf_data[off:off+ln] * * if parsed.field_set & (1 << uparser.UF_USERINFO): # <<<<<<<<<<<<<< * off = parsed.field_data[<int>uparser.UF_USERINFO].off * ln = parsed.field_data[<int>uparser.UF_USERINFO].len */ } /* "httptools/parser/url_parser.pyx":103 * userinfo = buf_data[off:off+ln] * * return URL(schema, host, port, path, query, fragment, userinfo) # <<<<<<<<<<<<<< * else: * raise HttpParserInvalidURLError("invalid url {!r}".format(url)) */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = PyTuple_New(7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 103, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_v_schema); __Pyx_GIVEREF(__pyx_v_schema); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_schema); __Pyx_INCREF(__pyx_v_host); __Pyx_GIVEREF(__pyx_v_host); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_host); __Pyx_INCREF(__pyx_v_port); __Pyx_GIVEREF(__pyx_v_port); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_port); __Pyx_INCREF(__pyx_v_path); __Pyx_GIVEREF(__pyx_v_path); PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_v_path); __Pyx_INCREF(__pyx_v_query); __Pyx_GIVEREF(__pyx_v_query); PyTuple_SET_ITEM(__pyx_t_4, 4, __pyx_v_query); __Pyx_INCREF(__pyx_v_fragment); __Pyx_GIVEREF(__pyx_v_fragment); PyTuple_SET_ITEM(__pyx_t_4, 5, __pyx_v_fragment); __Pyx_INCREF(__pyx_v_userinfo); __Pyx_GIVEREF(__pyx_v_userinfo); PyTuple_SET_ITEM(__pyx_t_4, 6, __pyx_v_userinfo); __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_ptype_9httptools_6parser_10url_parser_URL), __pyx_t_4, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 103, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L3_return; /* "httptools/parser/url_parser.pyx":69 * res = uparser.http_parser_parse_url(buf_data, py_buf.len, 0, parsed) * * if res == 0: # <<<<<<<<<<<<<< * if parsed.field_set & (1 << uparser.UF_SCHEMA): * off = parsed.field_data[<int>uparser.UF_SCHEMA].off */ } /* "httptools/parser/url_parser.pyx":105 * return URL(schema, host, port, path, query, fragment, userinfo) * else: * raise HttpParserInvalidURLError("invalid url {!r}".format(url)) # <<<<<<<<<<<<<< * finally: * PyBuffer_Release(&py_buf) */ /*else*/ { __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_HttpParserInvalidURLError); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 105, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_invalid_url_r, __pyx_n_s_format); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 105, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_6 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_v_url) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_v_url); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 105, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_5 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_7, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 105, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 105, __pyx_L4_error) } } /* "httptools/parser/url_parser.pyx":107 * raise HttpParserInvalidURLError("invalid url {!r}".format(url)) * finally: * PyBuffer_Release(&py_buf) # <<<<<<<<<<<<<< * PyMem_Free(parsed) */ /*finally:*/ { __pyx_L4_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0; __pyx_t_16 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_14, &__pyx_t_15, &__pyx_t_16); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_11, &__pyx_t_12, &__pyx_t_13) < 0)) __Pyx_ErrFetch(&__pyx_t_11, &__pyx_t_12, &__pyx_t_13); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __Pyx_XGOTREF(__pyx_t_13); __Pyx_XGOTREF(__pyx_t_14); __Pyx_XGOTREF(__pyx_t_15); __Pyx_XGOTREF(__pyx_t_16); __pyx_t_1 = __pyx_lineno; __pyx_t_9 = __pyx_clineno; __pyx_t_10 = __pyx_filename; { PyBuffer_Release((&__pyx_v_py_buf)); /* "httptools/parser/url_parser.pyx":108 * finally: * PyBuffer_Release(&py_buf) * PyMem_Free(parsed) # <<<<<<<<<<<<<< */ PyMem_Free(__pyx_v_parsed); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_14); __Pyx_XGIVEREF(__pyx_t_15); __Pyx_XGIVEREF(__pyx_t_16); __Pyx_ExceptionReset(__pyx_t_14, __pyx_t_15, __pyx_t_16); } __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_XGIVEREF(__pyx_t_13); __Pyx_ErrRestore(__pyx_t_11, __pyx_t_12, __pyx_t_13); __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0; __pyx_t_16 = 0; __pyx_lineno = __pyx_t_1; __pyx_clineno = __pyx_t_9; __pyx_filename = __pyx_t_10; goto __pyx_L1_error; } __pyx_L3_return: { __pyx_t_16 = __pyx_r; __pyx_r = 0; /* "httptools/parser/url_parser.pyx":107 * raise HttpParserInvalidURLError("invalid url {!r}".format(url)) * finally: * PyBuffer_Release(&py_buf) # <<<<<<<<<<<<<< * PyMem_Free(parsed) */ PyBuffer_Release((&__pyx_v_py_buf)); /* "httptools/parser/url_parser.pyx":108 * finally: * PyBuffer_Release(&py_buf) * PyMem_Free(parsed) # <<<<<<<<<<<<<< */ PyMem_Free(__pyx_v_parsed); __pyx_r = __pyx_t_16; __pyx_t_16 = 0; goto __pyx_L0; } } /* "httptools/parser/url_parser.pyx":43 * * * def parse_url(url): # <<<<<<<<<<<<<< * cdef: * Py_buffer py_buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("httptools.parser.url_parser.parse_url", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_schema); __Pyx_XDECREF(__pyx_v_host); __Pyx_XDECREF(__pyx_v_port); __Pyx_XDECREF(__pyx_v_path); __Pyx_XDECREF(__pyx_v_query); __Pyx_XDECREF(__pyx_v_fragment); __Pyx_XDECREF(__pyx_v_userinfo); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_obj_9httptools_6parser_10url_parser_URL *__pyx_freelist_9httptools_6parser_10url_parser_URL[250]; static int __pyx_freecount_9httptools_6parser_10url_parser_URL = 0; static PyObject *__pyx_tp_new_9httptools_6parser_10url_parser_URL(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_obj_9httptools_6parser_10url_parser_URL *p; PyObject *o; if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_9httptools_6parser_10url_parser_URL > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_9httptools_6parser_10url_parser_URL)) & ((t->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0))) { o = (PyObject*)__pyx_freelist_9httptools_6parser_10url_parser_URL[--__pyx_freecount_9httptools_6parser_10url_parser_URL]; memset(o, 0, sizeof(struct __pyx_obj_9httptools_6parser_10url_parser_URL)); (void) PyObject_INIT(o, t); PyObject_GC_Track(o); } else { if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; } p = ((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)o); p->schema = ((PyObject*)Py_None); Py_INCREF(Py_None); p->host = ((PyObject*)Py_None); Py_INCREF(Py_None); p->port = Py_None; Py_INCREF(Py_None); p->path = ((PyObject*)Py_None); Py_INCREF(Py_None); p->query = ((PyObject*)Py_None); Py_INCREF(Py_None); p->fragment = ((PyObject*)Py_None); Py_INCREF(Py_None); p->userinfo = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_pw_9httptools_6parser_10url_parser_3URL_1__cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_9httptools_6parser_10url_parser_URL(PyObject *o) { struct __pyx_obj_9httptools_6parser_10url_parser_URL *p = (struct __pyx_obj_9httptools_6parser_10url_parser_URL *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->schema); Py_CLEAR(p->host); Py_CLEAR(p->port); Py_CLEAR(p->path); Py_CLEAR(p->query); Py_CLEAR(p->fragment); Py_CLEAR(p->userinfo); if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_9httptools_6parser_10url_parser_URL < 250) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_9httptools_6parser_10url_parser_URL)) & ((Py_TYPE(o)->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0))) { __pyx_freelist_9httptools_6parser_10url_parser_URL[__pyx_freecount_9httptools_6parser_10url_parser_URL++] = ((struct __pyx_obj_9httptools_6parser_10url_parser_URL *)o); } else { (*Py_TYPE(o)->tp_free)(o); } } static int __pyx_tp_traverse_9httptools_6parser_10url_parser_URL(PyObject *o, visitproc v, void *a) { int e; struct __pyx_obj_9httptools_6parser_10url_parser_URL *p = (struct __pyx_obj_9httptools_6parser_10url_parser_URL *)o; if (p->port) { e = (*v)(p->port, a); if (e) return e; } return 0; } static int __pyx_tp_clear_9httptools_6parser_10url_parser_URL(PyObject *o) { PyObject* tmp; struct __pyx_obj_9httptools_6parser_10url_parser_URL *p = (struct __pyx_obj_9httptools_6parser_10url_parser_URL *)o; tmp = ((PyObject*)p->port); p->port = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_schema(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_9httptools_6parser_10url_parser_3URL_6schema_1__get__(o); } static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_host(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_9httptools_6parser_10url_parser_3URL_4host_1__get__(o); } static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_port(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_9httptools_6parser_10url_parser_3URL_4port_1__get__(o); } static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_path(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_9httptools_6parser_10url_parser_3URL_4path_1__get__(o); } static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_query(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_9httptools_6parser_10url_parser_3URL_5query_1__get__(o); } static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_fragment(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_9httptools_6parser_10url_parser_3URL_8fragment_1__get__(o); } static PyObject *__pyx_getprop_9httptools_6parser_10url_parser_3URL_userinfo(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_9httptools_6parser_10url_parser_3URL_8userinfo_1__get__(o); } static PyMethodDef __pyx_methods_9httptools_6parser_10url_parser_URL[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_10url_parser_3URL_5__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_10url_parser_3URL_7__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_9httptools_6parser_10url_parser_URL[] = { {(char *)"schema", __pyx_getprop_9httptools_6parser_10url_parser_3URL_schema, 0, (char *)0, 0}, {(char *)"host", __pyx_getprop_9httptools_6parser_10url_parser_3URL_host, 0, (char *)0, 0}, {(char *)"port", __pyx_getprop_9httptools_6parser_10url_parser_3URL_port, 0, (char *)0, 0}, {(char *)"path", __pyx_getprop_9httptools_6parser_10url_parser_3URL_path, 0, (char *)0, 0}, {(char *)"query", __pyx_getprop_9httptools_6parser_10url_parser_3URL_query, 0, (char *)0, 0}, {(char *)"fragment", __pyx_getprop_9httptools_6parser_10url_parser_3URL_fragment, 0, (char *)0, 0}, {(char *)"userinfo", __pyx_getprop_9httptools_6parser_10url_parser_3URL_userinfo, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type_9httptools_6parser_10url_parser_URL = { PyVarObject_HEAD_INIT(0, 0) "httptools.parser.url_parser.URL", /*tp_name*/ sizeof(struct __pyx_obj_9httptools_6parser_10url_parser_URL), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_9httptools_6parser_10url_parser_URL, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_pw_9httptools_6parser_10url_parser_3URL_3__repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_9httptools_6parser_10url_parser_URL, /*tp_traverse*/ __pyx_tp_clear_9httptools_6parser_10url_parser_URL, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_9httptools_6parser_10url_parser_URL, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_9httptools_6parser_10url_parser_URL, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_9httptools_6parser_10url_parser_URL, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_url_parser(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_url_parser}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "url_parser", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_HttpParserInvalidURLError, __pyx_k_HttpParserInvalidURLError, sizeof(__pyx_k_HttpParserInvalidURLError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_n_s_URL, __pyx_k_URL, sizeof(__pyx_k_URL), 0, 0, 1, 1}, {&__pyx_kp_u_URL_schema_r_host_r_port_r_path, __pyx_k_URL_schema_r_host_r_port_r_path, sizeof(__pyx_k_URL_schema_r_host_r_port_r_path), 0, 1, 0, 0}, {&__pyx_n_s_all, __pyx_k_all, sizeof(__pyx_k_all), 0, 0, 1, 1}, {&__pyx_n_s_buf_data, __pyx_k_buf_data, sizeof(__pyx_k_buf_data), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_errors, __pyx_k_errors, sizeof(__pyx_k_errors), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fragment, __pyx_k_fragment, sizeof(__pyx_k_fragment), 0, 0, 1, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_n_s_host, __pyx_k_host, sizeof(__pyx_k_host), 0, 0, 1, 1}, {&__pyx_n_s_httptools_parser_url_parser, __pyx_k_httptools_parser_url_parser, sizeof(__pyx_k_httptools_parser_url_parser), 0, 0, 1, 1}, {&__pyx_kp_s_httptools_parser_url_parser_pyx, __pyx_k_httptools_parser_url_parser_pyx, sizeof(__pyx_k_httptools_parser_url_parser_pyx), 0, 0, 1, 0}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_kp_u_invalid_url_r, __pyx_k_invalid_url_r, sizeof(__pyx_k_invalid_url_r), 0, 1, 0, 0}, {&__pyx_n_s_ln, __pyx_k_ln, sizeof(__pyx_k_ln), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_off, __pyx_k_off, sizeof(__pyx_k_off), 0, 0, 1, 1}, {&__pyx_n_s_parse_url, __pyx_k_parse_url, sizeof(__pyx_k_parse_url), 0, 0, 1, 1}, {&__pyx_n_u_parse_url, __pyx_k_parse_url, sizeof(__pyx_k_parse_url), 0, 1, 0, 1}, {&__pyx_n_s_parsed, __pyx_k_parsed, sizeof(__pyx_k_parsed), 0, 0, 1, 1}, {&__pyx_n_s_path, __pyx_k_path, sizeof(__pyx_k_path), 0, 0, 1, 1}, {&__pyx_n_s_port, __pyx_k_port, sizeof(__pyx_k_port), 0, 0, 1, 1}, {&__pyx_n_s_py_buf, __pyx_k_py_buf, sizeof(__pyx_k_py_buf), 0, 0, 1, 1}, {&__pyx_n_s_query, __pyx_k_query, sizeof(__pyx_k_query), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_res, __pyx_k_res, sizeof(__pyx_k_res), 0, 0, 1, 1}, {&__pyx_n_s_result, __pyx_k_result, sizeof(__pyx_k_result), 0, 0, 1, 1}, {&__pyx_n_s_schema, __pyx_k_schema, sizeof(__pyx_k_schema), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_url, __pyx_k_url, sizeof(__pyx_k_url), 0, 0, 1, 1}, {&__pyx_n_s_userinfo, __pyx_k_userinfo, sizeof(__pyx_k_userinfo), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(0, 2, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "httptools/parser/url_parser.pyx":13 * from . cimport url_cparser as uparser * * __all__ = ('parse_url',) # <<<<<<<<<<<<<< * * @cython.freelist(250) */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_n_u_parse_url); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "httptools/parser/url_parser.pyx":43 * * * def parse_url(url): # <<<<<<<<<<<<<< * cdef: * Py_buffer py_buf */ __pyx_tuple__4 = PyTuple_Pack(15, __pyx_n_s_url, __pyx_n_s_py_buf, __pyx_n_s_buf_data, __pyx_n_s_parsed, __pyx_n_s_res, __pyx_n_s_schema, __pyx_n_s_host, __pyx_n_s_port, __pyx_n_s_path, __pyx_n_s_query, __pyx_n_s_fragment, __pyx_n_s_userinfo, __pyx_n_s_result, __pyx_n_s_off, __pyx_n_s_ln); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 43, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); __pyx_codeobj__5 = (PyObject*)__Pyx_PyCode_New(1, 0, 15, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__4, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_httptools_parser_url_parser_pyx, __pyx_n_s_parse_url, 43, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__5)) __PYX_ERR(1, 43, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(1, 1, __pyx_L1_error); return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ if (PyType_Ready(&__pyx_type_9httptools_6parser_10url_parser_URL) < 0) __PYX_ERR(1, 16, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_9httptools_6parser_10url_parser_URL.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_9httptools_6parser_10url_parser_URL.tp_dictoffset && __pyx_type_9httptools_6parser_10url_parser_URL.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_9httptools_6parser_10url_parser_URL.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (PyObject_SetAttr(__pyx_m, __pyx_n_s_URL, (PyObject *)&__pyx_type_9httptools_6parser_10url_parser_URL) < 0) __PYX_ERR(1, 16, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type_9httptools_6parser_10url_parser_URL) < 0) __PYX_ERR(1, 16, __pyx_L1_error) __pyx_ptype_9httptools_6parser_10url_parser_URL = &__pyx_type_9httptools_6parser_10url_parser_URL; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_4bool_bool) __PYX_ERR(3, 8, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(4, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_7complex_complex) __PYX_ERR(4, 15, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initurl_parser(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initurl_parser(void) #else __Pyx_PyMODINIT_FUNC PyInit_url_parser(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_url_parser(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_url_parser(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'url_parser' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_url_parser(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(1, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(1, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(1, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS PyEval_InitThreads(); #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("url_parser", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(1, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(1, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(1, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(1, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(1, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_httptools__parser__url_parser) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(1, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(1, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "httptools.parser.url_parser")) { if (unlikely(PyDict_SetItemString(modules, "httptools.parser.url_parser", __pyx_m) < 0)) __PYX_ERR(1, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(1, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(1, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(1, 1, __pyx_L1_error) if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(1, 1, __pyx_L1_error) (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif /* "httptools/parser/url_parser.pyx":8 * Py_buffer * * from .errors import HttpParserInvalidURLError # <<<<<<<<<<<<<< * * cimport cython */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_HttpParserInvalidURLError); __Pyx_GIVEREF(__pyx_n_s_HttpParserInvalidURLError); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_HttpParserInvalidURLError); __pyx_t_2 = __Pyx_Import(__pyx_n_s_errors, __pyx_t_1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserInvalidURLError); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserInvalidURLError, __pyx_t_1) < 0) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "httptools/parser/url_parser.pyx":13 * from . cimport url_cparser as uparser * * __all__ = ('parse_url',) # <<<<<<<<<<<<<< * * @cython.freelist(250) */ if (PyDict_SetItem(__pyx_d, __pyx_n_s_all, __pyx_tuple__3) < 0) __PYX_ERR(1, 13, __pyx_L1_error) /* "httptools/parser/url_parser.pyx":43 * * * def parse_url(url): # <<<<<<<<<<<<<< * cdef: * Py_buffer py_buf */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_9httptools_6parser_10url_parser_1parse_url, NULL, __pyx_n_s_httptools_parser_url_parser); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 43, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_parse_url, __pyx_t_2) < 0) __PYX_ERR(1, 43, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "httptools/parser/url_parser.pyx":1 * #cython: language_level=3 # <<<<<<<<<<<<<< * * from __future__ import print_function */ __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init httptools.parser.url_parser", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init httptools.parser.url_parser"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = Py_TYPE(func)->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (__Pyx_PyFastCFunction_Check(func)) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size) { PyObject *result = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif result = PyObject_GetAttrString(module, class_name); if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if ((size_t)basicsize < size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(result); return NULL; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; (void) PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = NULL; PyObject *py_funcname = NULL; #if PY_MAJOR_VERSION < 3 PyObject *py_srcfile = NULL; py_srcfile = PyString_FromString(filename); if (!py_srcfile) goto bad; #endif if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); if (!py_funcname) goto bad; #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); if (!py_funcname) goto bad; funcname = PyUnicode_AsUTF8(py_funcname); if (!funcname) goto bad; #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); if (!py_funcname) goto bad; #endif } #if PY_MAJOR_VERSION < 3 py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); #else py_code = PyCode_NewEmpty(filename, funcname, py_line); #endif Py_XDECREF(py_funcname); // XDECREF since it's only set on Py3 if cline return py_code; bad: Py_XDECREF(py_funcname); #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_srcfile); #endif return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint16_t(uint16_t value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const uint16_t neg_one = (uint16_t) -1, const_zero = (uint16_t) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(uint16_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(uint16_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint16_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(uint16_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint16_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(uint16_t), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const long neg_one = (long) -1, const_zero = (long) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const long neg_one = (long) -1, const_zero = (long) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const int neg_one = (int) -1, const_zero = (int) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) { return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o); #if PY_MAJOR_VERSION < 3 } else if (likely(PyInt_CheckExact(o))) { return PyInt_AS_LONG(o); #endif } else { Py_ssize_t ival; PyObject *x; x = PyNumber_Index(o); if (!x) return -1; ival = PyInt_AsLong(x); Py_DECREF(x); return ival; } } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
236,857
C
38.26691
356
0.595532
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/httptools/parser/__init__.py
from .parser import * # NoQA from .errors import * # NoQA from .url_parser import * # NoQA __all__ = parser.__all__ + errors.__all__ + url_parser.__all__ # NoQA
166
Python
26.833329
70
0.60241
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/httptools/parser/errors.py
__all__ = ('HttpParserError', 'HttpParserCallbackError', 'HttpParserInvalidStatusError', 'HttpParserInvalidMethodError', 'HttpParserInvalidURLError', 'HttpParserUpgrade') class HttpParserError(Exception): pass class HttpParserCallbackError(HttpParserError): pass class HttpParserInvalidStatusError(HttpParserError): pass class HttpParserInvalidMethodError(HttpParserError): pass class HttpParserInvalidURLError(HttpParserError): pass class HttpParserUpgrade(Exception): pass
566
Python
17.290322
52
0.719081
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/httptools/parser/parser.c
/* Generated by Cython 0.29.28 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [], "extra_compile_args": [ "-O2" ], "name": "httptools.parser.parser", "sources": [ "httptools/parser/parser.pyx" ] }, "module_name": "httptools.parser.parser" } END: Cython Metadata */ #ifndef PY_SSIZE_T_CLEAN #define PY_SSIZE_T_CLEAN #endif /* PY_SSIZE_T_CLEAN */ #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_28" #define CYTHON_HEX_VERSION 0x001D1CF0 #define CYTHON_FUTURE_DIVISION 1 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #if PY_VERSION_HEX >= 0x030B00A4 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #elif !defined(CYTHON_FAST_THREAD_STATE) #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL (PY_VERSION_HEX < 0x030B00A1) #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #if PY_VERSION_HEX >= 0x030B00A4 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif !defined(CYTHON_USE_EXC_INFO_STACK) #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #if PY_MAJOR_VERSION < 3 #include "longintrepr.h" #endif #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_DefaultClassType PyType_Type #if PY_VERSION_HEX >= 0x030B00A1 static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int k, int l, int s, int f, PyObject *code, PyObject *c, PyObject* n, PyObject *v, PyObject *fv, PyObject *cell, PyObject* fn, PyObject *name, int fline, PyObject *lnos) { PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL; PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *call_result=NULL, *empty=NULL; const char *fn_cstr=NULL; const char *name_cstr=NULL; PyCodeObject* co=NULL; PyObject *type, *value, *traceback; PyErr_Fetch(&type, &value, &traceback); if (!(kwds=PyDict_New())) goto end; if (!(argcount=PyLong_FromLong(a))) goto end; if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end; if (!(posonlyargcount=PyLong_FromLong(0))) goto end; if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end; if (!(kwonlyargcount=PyLong_FromLong(k))) goto end; if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end; if (!(nlocals=PyLong_FromLong(l))) goto end; if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end; if (!(stacksize=PyLong_FromLong(s))) goto end; if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end; if (!(flags=PyLong_FromLong(f))) goto end; if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end; if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end; if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end; if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end; if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end; if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end; if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end; if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end; if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end; if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end; if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end; if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto cleanup_code_too; if (!(empty = PyTuple_New(0))) goto cleanup_code_too; // unfortunately __pyx_empty_tuple isn't available here if (!(call_result = PyObject_Call(replace, empty, kwds))) goto cleanup_code_too; Py_XDECREF((PyObject*)co); co = (PyCodeObject*)call_result; call_result = NULL; if (0) { cleanup_code_too: Py_XDECREF((PyObject*)co); co = NULL; } end: Py_XDECREF(kwds); Py_XDECREF(argcount); Py_XDECREF(posonlyargcount); Py_XDECREF(kwonlyargcount); Py_XDECREF(nlocals); Py_XDECREF(stacksize); Py_XDECREF(replace); Py_XDECREF(call_result); Py_XDECREF(empty); if (type) { PyErr_Restore(type, value, traceback); } return co; } #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #if defined(PyUnicode_IS_READY) #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #else #define __Pyx_PyUnicode_READY(op) (0) #endif #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #endif #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__httptools__parser__parser #define __PYX_HAVE_API__httptools__parser__parser /* Early includes */ #include <string.h> #include <stdio.h> #include "pythread.h" #include <stdint.h> #include "llhttp.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "httptools\\parser\\parser.pyx", "stringsource", "type.pxd", "bool.pxd", "complex.pxd", }; /*--- Type declarations ---*/ struct __pyx_obj_9httptools_6parser_6parser_HttpParser; struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser; struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser; /* "httptools/parser/parser.pyx":26 * * @cython.internal * cdef class HttpParser: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_obj_9httptools_6parser_6parser_HttpParser { PyObject_HEAD struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *__pyx_vtab; llhttp_t *_cparser; llhttp_settings_t *_csettings; PyObject *_current_header_name; PyObject *_current_header_value; PyObject *_proto_on_url; PyObject *_proto_on_status; PyObject *_proto_on_body; PyObject *_proto_on_header; PyObject *_proto_on_headers_complete; PyObject *_proto_on_message_complete; PyObject *_proto_on_chunk_header; PyObject *_proto_on_chunk_complete; PyObject *_proto_on_message_begin; PyObject *_last_error; Py_buffer py_buf; }; /* "httptools/parser/parser.pyx":215 * * * cdef class HttpRequestParser(HttpParser): # <<<<<<<<<<<<<< * * def __init__(self, protocol): */ struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser { struct __pyx_obj_9httptools_6parser_6parser_HttpParser __pyx_base; }; /* "httptools/parser/parser.pyx":229 * * * cdef class HttpResponseParser(HttpParser): # <<<<<<<<<<<<<< * * def __init__(self, protocol): */ struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser { struct __pyx_obj_9httptools_6parser_6parser_HttpParser __pyx_base; }; /* "httptools/parser/parser.pyx":26 * * @cython.internal * cdef class HttpParser: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser { PyObject *(*_init)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *, PyObject *, llhttp_type_t); PyObject *(*_maybe_call_on_header)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *); PyObject *(*_on_header_field)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *, PyObject *); PyObject *(*_on_header_value)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *, PyObject *); PyObject *(*_on_headers_complete)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *); PyObject *(*_on_chunk_header)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *); PyObject *(*_on_chunk_complete)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *); }; static struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *__pyx_vtabptr_9httptools_6parser_6parser_HttpParser; /* "httptools/parser/parser.pyx":215 * * * cdef class HttpRequestParser(HttpParser): # <<<<<<<<<<<<<< * * def __init__(self, protocol): */ struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpRequestParser { struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser __pyx_base; }; static struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpRequestParser *__pyx_vtabptr_9httptools_6parser_6parser_HttpRequestParser; /* "httptools/parser/parser.pyx":229 * * * cdef class HttpResponseParser(HttpParser): # <<<<<<<<<<<<<< * * def __init__(self, protocol): */ struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpResponseParser { struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser __pyx_base; }; static struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpResponseParser *__pyx_vtabptr_9httptools_6parser_6parser_HttpResponseParser; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* KeywordStringCheck.proto */ static int __Pyx_CheckKeywordStrings(PyObject *kwdict, const char* function_name, int kw_allowed); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif #if CYTHON_FAST_PYCALL static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif // CYTHON_FAST_PYCALL #endif /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyObjectSetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS #define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value); #else #define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n) #define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v) #endif /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_bytes.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes( const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* decode_bytes.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_bytes( PyObject* string, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { return __Pyx_decode_c_bytes( PyBytes_AS_STRING(string), PyBytes_GET_SIZE(string), start, stop, encoding, errors, decode_func); } /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* TypeImport.proto */ #ifndef __PYX_HAVE_RT_ImportType_proto #define __PYX_HAVE_RT_ImportType_proto enum __Pyx_ImportType_CheckSize { __Pyx_ImportType_CheckSize_Error = 0, __Pyx_ImportType_CheckSize_Warn = 1, __Pyx_ImportType_CheckSize_Ignore = 2 }; static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* GCCDiagnostics.proto */ #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) #define __Pyx_HAS_GCC_DIAGNOSTIC #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint8_t(uint8_t value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_ptrdiff_t(ptrdiff_t value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint16_t(uint16_t value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__init(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_protocol, llhttp_type_t __pyx_v_mode); /* proto*/ static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__maybe_call_on_header(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto*/ static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_header_field(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_field); /* proto*/ static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_header_value(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_val); /* proto*/ static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_headers_complete(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto*/ static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_chunk_header(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto*/ static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_chunk_complete(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto*/ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'cpython.version' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'cpython.exc' */ /* Module declarations from 'cpython.module' */ /* Module declarations from 'cpython.tuple' */ /* Module declarations from 'cpython.list' */ /* Module declarations from 'cpython.sequence' */ /* Module declarations from 'cpython.mapping' */ /* Module declarations from 'cpython.iterator' */ /* Module declarations from 'cpython.number' */ /* Module declarations from 'cpython.int' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.bool' */ static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; /* Module declarations from 'cpython.long' */ /* Module declarations from 'cpython.float' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.complex' */ static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; /* Module declarations from 'cpython.string' */ /* Module declarations from 'cpython.unicode' */ /* Module declarations from 'cpython.dict' */ /* Module declarations from 'cpython.instance' */ /* Module declarations from 'cpython.function' */ /* Module declarations from 'cpython.method' */ /* Module declarations from 'cpython.weakref' */ /* Module declarations from 'cpython.getargs' */ /* Module declarations from 'cpython.pythread' */ /* Module declarations from 'cpython.pystate' */ /* Module declarations from 'cpython.cobject' */ /* Module declarations from 'cpython.oldbuffer' */ /* Module declarations from 'cpython.set' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.bytes' */ /* Module declarations from 'cpython.pycapsule' */ /* Module declarations from 'cpython' */ /* Module declarations from 'httptools.parser.python' */ /* Module declarations from 'cython' */ /* Module declarations from 'httptools.parser' */ /* Module declarations from 'libc.stdint' */ /* Module declarations from 'httptools.parser.cparser' */ /* Module declarations from 'httptools.parser.parser' */ static PyTypeObject *__pyx_ptype_9httptools_6parser_6parser_HttpParser = 0; static PyTypeObject *__pyx_ptype_9httptools_6parser_6parser_HttpRequestParser = 0; static PyTypeObject *__pyx_ptype_9httptools_6parser_6parser_HttpResponseParser = 0; static int __pyx_f_9httptools_6parser_6parser_cb_on_message_begin(llhttp_t *); /*proto*/ static int __pyx_f_9httptools_6parser_6parser_cb_on_url(llhttp_t *, char const *, size_t); /*proto*/ static int __pyx_f_9httptools_6parser_6parser_cb_on_status(llhttp_t *, char const *, size_t); /*proto*/ static int __pyx_f_9httptools_6parser_6parser_cb_on_header_field(llhttp_t *, char const *, size_t); /*proto*/ static int __pyx_f_9httptools_6parser_6parser_cb_on_header_value(llhttp_t *, char const *, size_t); /*proto*/ static int __pyx_f_9httptools_6parser_6parser_cb_on_headers_complete(llhttp_t *); /*proto*/ static int __pyx_f_9httptools_6parser_6parser_cb_on_body(llhttp_t *, char const *, size_t); /*proto*/ static int __pyx_f_9httptools_6parser_6parser_cb_on_message_complete(llhttp_t *); /*proto*/ static int __pyx_f_9httptools_6parser_6parser_cb_on_chunk_header(llhttp_t *); /*proto*/ static int __pyx_f_9httptools_6parser_6parser_cb_on_chunk_complete(llhttp_t *); /*proto*/ static PyObject *__pyx_f_9httptools_6parser_6parser_parser_error_from_errno(llhttp_t *, llhttp_errno_t); /*proto*/ #define __Pyx_MODULE_NAME "httptools.parser.parser" extern int __pyx_module_is_main_httptools__parser__parser; int __pyx_module_is_main_httptools__parser__parser = 0; /* Implementation of 'httptools.parser.parser' */ static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_BaseException; static const char __pyx_k_[] = "{}.{}"; static const char __pyx_k_all[] = "__all__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_name[] = "__name__"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_errors[] = "errors"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_on_url[] = "on_url"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_context[] = "__context__"; static const char __pyx_k_on_body[] = "on_body"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_protocol[] = "protocol"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_on_header[] = "on_header"; static const char __pyx_k_on_status[] = "on_status"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_BaseException[] = "BaseException"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_HttpParserError[] = "HttpParserError"; static const char __pyx_k_on_chunk_header[] = "on_chunk_header"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_on_message_begin[] = "on_message_begin"; static const char __pyx_k_HttpParserUpgrade[] = "HttpParserUpgrade"; static const char __pyx_k_HttpRequestParser[] = "HttpRequestParser"; static const char __pyx_k_on_chunk_complete[] = "on_chunk_complete"; static const char __pyx_k_HttpResponseParser[] = "HttpResponseParser"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_on_headers_complete[] = "on_headers_complete"; static const char __pyx_k_on_message_complete[] = "on_message_complete"; static const char __pyx_k_invalid_headers_state[] = "invalid headers state"; static const char __pyx_k_HttpParserCallbackError[] = "HttpParserCallbackError"; static const char __pyx_k_HttpParserInvalidURLError[] = "HttpParserInvalidURLError"; static const char __pyx_k_HttpParserInvalidMethodError[] = "HttpParserInvalidMethodError"; static const char __pyx_k_HttpParserInvalidStatusError[] = "HttpParserInvalidStatusError"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static PyObject *__pyx_kp_u_; static PyObject *__pyx_n_s_BaseException; static PyObject *__pyx_n_s_HttpParserCallbackError; static PyObject *__pyx_n_s_HttpParserError; static PyObject *__pyx_n_s_HttpParserInvalidMethodError; static PyObject *__pyx_n_s_HttpParserInvalidStatusError; static PyObject *__pyx_n_s_HttpParserInvalidURLError; static PyObject *__pyx_n_s_HttpParserUpgrade; static PyObject *__pyx_n_s_HttpRequestParser; static PyObject *__pyx_n_u_HttpRequestParser; static PyObject *__pyx_n_s_HttpResponseParser; static PyObject *__pyx_n_u_HttpResponseParser; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_n_s_all; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_context; static PyObject *__pyx_n_s_errors; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_n_s_import; static PyObject *__pyx_kp_u_invalid_headers_state; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_name; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_u_on_body; static PyObject *__pyx_n_u_on_chunk_complete; static PyObject *__pyx_n_u_on_chunk_header; static PyObject *__pyx_n_u_on_header; static PyObject *__pyx_n_u_on_headers_complete; static PyObject *__pyx_n_u_on_message_begin; static PyObject *__pyx_n_u_on_message_complete; static PyObject *__pyx_n_u_on_status; static PyObject *__pyx_n_u_on_url; static PyObject *__pyx_n_s_protocol; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_test; static int __pyx_pf_9httptools_6parser_6parser_10HttpParser___cinit__(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto */ static void __pyx_pf_9httptools_6parser_6parser_10HttpParser_2__dealloc__(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_4get_http_version(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_6should_keep_alive(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_8should_upgrade(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_10feed_data(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_12__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_14__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_pf_9httptools_6parser_6parser_17HttpRequestParser___init__(struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self, PyObject *__pyx_v_protocol); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_2get_method(struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_4__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_6__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_pf_9httptools_6parser_6parser_18HttpResponseParser___init__(struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self, PyObject *__pyx_v_protocol); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_2get_status_code(struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_4__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_6__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_9httptools_6parser_6parser_HttpParser(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_9httptools_6parser_6parser_HttpRequestParser(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_9httptools_6parser_6parser_HttpResponseParser(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; /* Late includes */ /* "httptools/parser/parser.pyx":44 * Py_buffer py_buf * * def __cinit__(self): # <<<<<<<<<<<<<< * self._cparser = <cparser.llhttp_t*> \ * PyMem_Malloc(sizeof(cparser.llhttp_t)) */ /* Python wrapper */ static int __pyx_pw_9httptools_6parser_6parser_10HttpParser_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_9httptools_6parser_6parser_10HttpParser_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); if (unlikely(PyTuple_GET_SIZE(__pyx_args) > 0)) { __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 0, 0, PyTuple_GET_SIZE(__pyx_args)); return -1;} if (unlikely(__pyx_kwds) && unlikely(PyDict_Size(__pyx_kwds) > 0) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__cinit__", 0))) return -1; __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser___cinit__(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_9httptools_6parser_6parser_10HttpParser___cinit__(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "httptools/parser/parser.pyx":45 * * def __cinit__(self): * self._cparser = <cparser.llhttp_t*> \ # <<<<<<<<<<<<<< * PyMem_Malloc(sizeof(cparser.llhttp_t)) * if self._cparser is NULL: */ __pyx_v_self->_cparser = ((llhttp_t *)PyMem_Malloc((sizeof(llhttp_t)))); /* "httptools/parser/parser.pyx":47 * self._cparser = <cparser.llhttp_t*> \ * PyMem_Malloc(sizeof(cparser.llhttp_t)) * if self._cparser is NULL: # <<<<<<<<<<<<<< * raise MemoryError() * */ __pyx_t_1 = ((__pyx_v_self->_cparser == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "httptools/parser/parser.pyx":48 * PyMem_Malloc(sizeof(cparser.llhttp_t)) * if self._cparser is NULL: * raise MemoryError() # <<<<<<<<<<<<<< * * self._csettings = <cparser.llhttp_settings_t*> \ */ PyErr_NoMemory(); __PYX_ERR(0, 48, __pyx_L1_error) /* "httptools/parser/parser.pyx":47 * self._cparser = <cparser.llhttp_t*> \ * PyMem_Malloc(sizeof(cparser.llhttp_t)) * if self._cparser is NULL: # <<<<<<<<<<<<<< * raise MemoryError() * */ } /* "httptools/parser/parser.pyx":50 * raise MemoryError() * * self._csettings = <cparser.llhttp_settings_t*> \ # <<<<<<<<<<<<<< * PyMem_Malloc(sizeof(cparser.llhttp_settings_t)) * if self._csettings is NULL: */ __pyx_v_self->_csettings = ((llhttp_settings_t *)PyMem_Malloc((sizeof(llhttp_settings_t)))); /* "httptools/parser/parser.pyx":52 * self._csettings = <cparser.llhttp_settings_t*> \ * PyMem_Malloc(sizeof(cparser.llhttp_settings_t)) * if self._csettings is NULL: # <<<<<<<<<<<<<< * raise MemoryError() * */ __pyx_t_1 = ((__pyx_v_self->_csettings == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "httptools/parser/parser.pyx":53 * PyMem_Malloc(sizeof(cparser.llhttp_settings_t)) * if self._csettings is NULL: * raise MemoryError() # <<<<<<<<<<<<<< * * def __dealloc__(self): */ PyErr_NoMemory(); __PYX_ERR(0, 53, __pyx_L1_error) /* "httptools/parser/parser.pyx":52 * self._csettings = <cparser.llhttp_settings_t*> \ * PyMem_Malloc(sizeof(cparser.llhttp_settings_t)) * if self._csettings is NULL: # <<<<<<<<<<<<<< * raise MemoryError() * */ } /* "httptools/parser/parser.pyx":44 * Py_buffer py_buf * * def __cinit__(self): # <<<<<<<<<<<<<< * self._cparser = <cparser.llhttp_t*> \ * PyMem_Malloc(sizeof(cparser.llhttp_t)) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("httptools.parser.parser.HttpParser.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":55 * raise MemoryError() * * def __dealloc__(self): # <<<<<<<<<<<<<< * PyMem_Free(self._cparser) * PyMem_Free(self._csettings) */ /* Python wrapper */ static void __pyx_pw_9httptools_6parser_6parser_10HttpParser_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_9httptools_6parser_6parser_10HttpParser_3__dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_pf_9httptools_6parser_6parser_10HttpParser_2__dealloc__(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_9httptools_6parser_6parser_10HttpParser_2__dealloc__(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "httptools/parser/parser.pyx":56 * * def __dealloc__(self): * PyMem_Free(self._cparser) # <<<<<<<<<<<<<< * PyMem_Free(self._csettings) * */ PyMem_Free(__pyx_v_self->_cparser); /* "httptools/parser/parser.pyx":57 * def __dealloc__(self): * PyMem_Free(self._cparser) * PyMem_Free(self._csettings) # <<<<<<<<<<<<<< * * cdef _init(self, protocol, cparser.llhttp_type_t mode): */ PyMem_Free(__pyx_v_self->_csettings); /* "httptools/parser/parser.pyx":55 * raise MemoryError() * * def __dealloc__(self): # <<<<<<<<<<<<<< * PyMem_Free(self._cparser) * PyMem_Free(self._csettings) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "httptools/parser/parser.pyx":59 * PyMem_Free(self._csettings) * * cdef _init(self, protocol, cparser.llhttp_type_t mode): # <<<<<<<<<<<<<< * cparser.llhttp_settings_init(self._csettings) * */ static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__init(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_protocol, llhttp_type_t __pyx_v_mode) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_init", 0); /* "httptools/parser/parser.pyx":60 * * cdef _init(self, protocol, cparser.llhttp_type_t mode): * cparser.llhttp_settings_init(self._csettings) # <<<<<<<<<<<<<< * * cparser.llhttp_init(self._cparser, mode, self._csettings) */ llhttp_settings_init(__pyx_v_self->_csettings); /* "httptools/parser/parser.pyx":62 * cparser.llhttp_settings_init(self._csettings) * * cparser.llhttp_init(self._cparser, mode, self._csettings) # <<<<<<<<<<<<<< * self._cparser.data = <void*>self * */ llhttp_init(__pyx_v_self->_cparser, __pyx_v_mode, __pyx_v_self->_csettings); /* "httptools/parser/parser.pyx":63 * * cparser.llhttp_init(self._cparser, mode, self._csettings) * self._cparser.data = <void*>self # <<<<<<<<<<<<<< * * self._current_header_name = None */ __pyx_v_self->_cparser->data = ((void *)__pyx_v_self); /* "httptools/parser/parser.pyx":65 * self._cparser.data = <void*>self * * self._current_header_name = None # <<<<<<<<<<<<<< * self._current_header_value = None * */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_self->_current_header_name); __Pyx_DECREF(__pyx_v_self->_current_header_name); __pyx_v_self->_current_header_name = ((PyObject*)Py_None); /* "httptools/parser/parser.pyx":66 * * self._current_header_name = None * self._current_header_value = None # <<<<<<<<<<<<<< * * self._proto_on_header = getattr(protocol, 'on_header', None) */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_self->_current_header_value); __Pyx_DECREF(__pyx_v_self->_current_header_value); __pyx_v_self->_current_header_value = ((PyObject*)Py_None); /* "httptools/parser/parser.pyx":68 * self._current_header_value = None * * self._proto_on_header = getattr(protocol, 'on_header', None) # <<<<<<<<<<<<<< * if self._proto_on_header is not None: * self._csettings.on_header_field = cb_on_header_field */ __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_header, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v_self->_proto_on_header); __Pyx_DECREF(__pyx_v_self->_proto_on_header); __pyx_v_self->_proto_on_header = __pyx_t_1; __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":69 * * self._proto_on_header = getattr(protocol, 'on_header', None) * if self._proto_on_header is not None: # <<<<<<<<<<<<<< * self._csettings.on_header_field = cb_on_header_field * self._csettings.on_header_value = cb_on_header_value */ __pyx_t_2 = (__pyx_v_self->_proto_on_header != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "httptools/parser/parser.pyx":70 * self._proto_on_header = getattr(protocol, 'on_header', None) * if self._proto_on_header is not None: * self._csettings.on_header_field = cb_on_header_field # <<<<<<<<<<<<<< * self._csettings.on_header_value = cb_on_header_value * self._proto_on_headers_complete = getattr( */ __pyx_v_self->_csettings->on_header_field = __pyx_f_9httptools_6parser_6parser_cb_on_header_field; /* "httptools/parser/parser.pyx":71 * if self._proto_on_header is not None: * self._csettings.on_header_field = cb_on_header_field * self._csettings.on_header_value = cb_on_header_value # <<<<<<<<<<<<<< * self._proto_on_headers_complete = getattr( * protocol, 'on_headers_complete', None) */ __pyx_v_self->_csettings->on_header_value = __pyx_f_9httptools_6parser_6parser_cb_on_header_value; /* "httptools/parser/parser.pyx":69 * * self._proto_on_header = getattr(protocol, 'on_header', None) * if self._proto_on_header is not None: # <<<<<<<<<<<<<< * self._csettings.on_header_field = cb_on_header_field * self._csettings.on_header_value = cb_on_header_value */ } /* "httptools/parser/parser.pyx":72 * self._csettings.on_header_field = cb_on_header_field * self._csettings.on_header_value = cb_on_header_value * self._proto_on_headers_complete = getattr( # <<<<<<<<<<<<<< * protocol, 'on_headers_complete', None) * self._csettings.on_headers_complete = cb_on_headers_complete */ __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_headers_complete, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 72, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v_self->_proto_on_headers_complete); __Pyx_DECREF(__pyx_v_self->_proto_on_headers_complete); __pyx_v_self->_proto_on_headers_complete = __pyx_t_1; __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":74 * self._proto_on_headers_complete = getattr( * protocol, 'on_headers_complete', None) * self._csettings.on_headers_complete = cb_on_headers_complete # <<<<<<<<<<<<<< * * self._proto_on_body = getattr(protocol, 'on_body', None) */ __pyx_v_self->_csettings->on_headers_complete = __pyx_f_9httptools_6parser_6parser_cb_on_headers_complete; /* "httptools/parser/parser.pyx":76 * self._csettings.on_headers_complete = cb_on_headers_complete * * self._proto_on_body = getattr(protocol, 'on_body', None) # <<<<<<<<<<<<<< * if self._proto_on_body is not None: * self._csettings.on_body = cb_on_body */ __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_body, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v_self->_proto_on_body); __Pyx_DECREF(__pyx_v_self->_proto_on_body); __pyx_v_self->_proto_on_body = __pyx_t_1; __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":77 * * self._proto_on_body = getattr(protocol, 'on_body', None) * if self._proto_on_body is not None: # <<<<<<<<<<<<<< * self._csettings.on_body = cb_on_body * */ __pyx_t_3 = (__pyx_v_self->_proto_on_body != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { /* "httptools/parser/parser.pyx":78 * self._proto_on_body = getattr(protocol, 'on_body', None) * if self._proto_on_body is not None: * self._csettings.on_body = cb_on_body # <<<<<<<<<<<<<< * * self._proto_on_message_begin = getattr( */ __pyx_v_self->_csettings->on_body = __pyx_f_9httptools_6parser_6parser_cb_on_body; /* "httptools/parser/parser.pyx":77 * * self._proto_on_body = getattr(protocol, 'on_body', None) * if self._proto_on_body is not None: # <<<<<<<<<<<<<< * self._csettings.on_body = cb_on_body * */ } /* "httptools/parser/parser.pyx":80 * self._csettings.on_body = cb_on_body * * self._proto_on_message_begin = getattr( # <<<<<<<<<<<<<< * protocol, 'on_message_begin', None) * if self._proto_on_message_begin is not None: */ __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_message_begin, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v_self->_proto_on_message_begin); __Pyx_DECREF(__pyx_v_self->_proto_on_message_begin); __pyx_v_self->_proto_on_message_begin = __pyx_t_1; __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":82 * self._proto_on_message_begin = getattr( * protocol, 'on_message_begin', None) * if self._proto_on_message_begin is not None: # <<<<<<<<<<<<<< * self._csettings.on_message_begin = cb_on_message_begin * */ __pyx_t_2 = (__pyx_v_self->_proto_on_message_begin != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "httptools/parser/parser.pyx":83 * protocol, 'on_message_begin', None) * if self._proto_on_message_begin is not None: * self._csettings.on_message_begin = cb_on_message_begin # <<<<<<<<<<<<<< * * self._proto_on_message_complete = getattr( */ __pyx_v_self->_csettings->on_message_begin = __pyx_f_9httptools_6parser_6parser_cb_on_message_begin; /* "httptools/parser/parser.pyx":82 * self._proto_on_message_begin = getattr( * protocol, 'on_message_begin', None) * if self._proto_on_message_begin is not None: # <<<<<<<<<<<<<< * self._csettings.on_message_begin = cb_on_message_begin * */ } /* "httptools/parser/parser.pyx":85 * self._csettings.on_message_begin = cb_on_message_begin * * self._proto_on_message_complete = getattr( # <<<<<<<<<<<<<< * protocol, 'on_message_complete', None) * if self._proto_on_message_complete is not None: */ __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_message_complete, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v_self->_proto_on_message_complete); __Pyx_DECREF(__pyx_v_self->_proto_on_message_complete); __pyx_v_self->_proto_on_message_complete = __pyx_t_1; __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":87 * self._proto_on_message_complete = getattr( * protocol, 'on_message_complete', None) * if self._proto_on_message_complete is not None: # <<<<<<<<<<<<<< * self._csettings.on_message_complete = cb_on_message_complete * */ __pyx_t_3 = (__pyx_v_self->_proto_on_message_complete != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { /* "httptools/parser/parser.pyx":88 * protocol, 'on_message_complete', None) * if self._proto_on_message_complete is not None: * self._csettings.on_message_complete = cb_on_message_complete # <<<<<<<<<<<<<< * * self._proto_on_chunk_header = getattr( */ __pyx_v_self->_csettings->on_message_complete = __pyx_f_9httptools_6parser_6parser_cb_on_message_complete; /* "httptools/parser/parser.pyx":87 * self._proto_on_message_complete = getattr( * protocol, 'on_message_complete', None) * if self._proto_on_message_complete is not None: # <<<<<<<<<<<<<< * self._csettings.on_message_complete = cb_on_message_complete * */ } /* "httptools/parser/parser.pyx":90 * self._csettings.on_message_complete = cb_on_message_complete * * self._proto_on_chunk_header = getattr( # <<<<<<<<<<<<<< * protocol, 'on_chunk_header', None) * self._csettings.on_chunk_header = cb_on_chunk_header */ __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_chunk_header, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v_self->_proto_on_chunk_header); __Pyx_DECREF(__pyx_v_self->_proto_on_chunk_header); __pyx_v_self->_proto_on_chunk_header = __pyx_t_1; __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":92 * self._proto_on_chunk_header = getattr( * protocol, 'on_chunk_header', None) * self._csettings.on_chunk_header = cb_on_chunk_header # <<<<<<<<<<<<<< * * self._proto_on_chunk_complete = getattr( */ __pyx_v_self->_csettings->on_chunk_header = __pyx_f_9httptools_6parser_6parser_cb_on_chunk_header; /* "httptools/parser/parser.pyx":94 * self._csettings.on_chunk_header = cb_on_chunk_header * * self._proto_on_chunk_complete = getattr( # <<<<<<<<<<<<<< * protocol, 'on_chunk_complete', None) * self._csettings.on_chunk_complete = cb_on_chunk_complete */ __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_chunk_complete, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v_self->_proto_on_chunk_complete); __Pyx_DECREF(__pyx_v_self->_proto_on_chunk_complete); __pyx_v_self->_proto_on_chunk_complete = __pyx_t_1; __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":96 * self._proto_on_chunk_complete = getattr( * protocol, 'on_chunk_complete', None) * self._csettings.on_chunk_complete = cb_on_chunk_complete # <<<<<<<<<<<<<< * * self._last_error = None */ __pyx_v_self->_csettings->on_chunk_complete = __pyx_f_9httptools_6parser_6parser_cb_on_chunk_complete; /* "httptools/parser/parser.pyx":98 * self._csettings.on_chunk_complete = cb_on_chunk_complete * * self._last_error = None # <<<<<<<<<<<<<< * * cdef _maybe_call_on_header(self): */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_self->_last_error); __Pyx_DECREF(__pyx_v_self->_last_error); __pyx_v_self->_last_error = Py_None; /* "httptools/parser/parser.pyx":59 * PyMem_Free(self._csettings) * * cdef _init(self, protocol, cparser.llhttp_type_t mode): # <<<<<<<<<<<<<< * cparser.llhttp_settings_init(self._csettings) * */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("httptools.parser.parser.HttpParser._init", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":100 * self._last_error = None * * cdef _maybe_call_on_header(self): # <<<<<<<<<<<<<< * if self._current_header_value is not None: * current_header_name = self._current_header_name */ static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__maybe_call_on_header(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { PyObject *__pyx_v_current_header_name = NULL; PyObject *__pyx_v_current_header_value = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_maybe_call_on_header", 0); /* "httptools/parser/parser.pyx":101 * * cdef _maybe_call_on_header(self): * if self._current_header_value is not None: # <<<<<<<<<<<<<< * current_header_name = self._current_header_name * current_header_value = self._current_header_value */ __pyx_t_1 = (__pyx_v_self->_current_header_value != ((PyObject*)Py_None)); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "httptools/parser/parser.pyx":102 * cdef _maybe_call_on_header(self): * if self._current_header_value is not None: * current_header_name = self._current_header_name # <<<<<<<<<<<<<< * current_header_value = self._current_header_value * */ __pyx_t_3 = __pyx_v_self->_current_header_name; __Pyx_INCREF(__pyx_t_3); __pyx_v_current_header_name = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "httptools/parser/parser.pyx":103 * if self._current_header_value is not None: * current_header_name = self._current_header_name * current_header_value = self._current_header_value # <<<<<<<<<<<<<< * * self._current_header_name = self._current_header_value = None */ __pyx_t_3 = __pyx_v_self->_current_header_value; __Pyx_INCREF(__pyx_t_3); __pyx_v_current_header_value = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "httptools/parser/parser.pyx":105 * current_header_value = self._current_header_value * * self._current_header_name = self._current_header_value = None # <<<<<<<<<<<<<< * * if self._proto_on_header is not None: */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_self->_current_header_name); __Pyx_DECREF(__pyx_v_self->_current_header_name); __pyx_v_self->_current_header_name = ((PyObject*)Py_None); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_self->_current_header_value); __Pyx_DECREF(__pyx_v_self->_current_header_value); __pyx_v_self->_current_header_value = ((PyObject*)Py_None); /* "httptools/parser/parser.pyx":107 * self._current_header_name = self._current_header_value = None * * if self._proto_on_header is not None: # <<<<<<<<<<<<<< * self._proto_on_header(current_header_name, * current_header_value) */ __pyx_t_2 = (__pyx_v_self->_proto_on_header != Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "httptools/parser/parser.pyx":109 * if self._proto_on_header is not None: * self._proto_on_header(current_header_name, * current_header_value) # <<<<<<<<<<<<<< * * cdef _on_header_field(self, bytes field): */ __Pyx_INCREF(__pyx_v_self->_proto_on_header); __pyx_t_4 = __pyx_v_self->_proto_on_header; __pyx_t_5 = NULL; __pyx_t_6 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); __pyx_t_6 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_v_current_header_name, __pyx_v_current_header_value}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 108, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_v_current_header_name, __pyx_v_current_header_value}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 108, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 108, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_v_current_header_name); __Pyx_GIVEREF(__pyx_v_current_header_name); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_v_current_header_name); __Pyx_INCREF(__pyx_v_current_header_value); __Pyx_GIVEREF(__pyx_v_current_header_value); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_v_current_header_value); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 108, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "httptools/parser/parser.pyx":107 * self._current_header_name = self._current_header_value = None * * if self._proto_on_header is not None: # <<<<<<<<<<<<<< * self._proto_on_header(current_header_name, * current_header_value) */ } /* "httptools/parser/parser.pyx":101 * * cdef _maybe_call_on_header(self): * if self._current_header_value is not None: # <<<<<<<<<<<<<< * current_header_name = self._current_header_name * current_header_value = self._current_header_value */ } /* "httptools/parser/parser.pyx":100 * self._last_error = None * * cdef _maybe_call_on_header(self): # <<<<<<<<<<<<<< * if self._current_header_value is not None: * current_header_name = self._current_header_name */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("httptools.parser.parser.HttpParser._maybe_call_on_header", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_current_header_name); __Pyx_XDECREF(__pyx_v_current_header_value); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":111 * current_header_value) * * cdef _on_header_field(self, bytes field): # <<<<<<<<<<<<<< * self._maybe_call_on_header() * if self._current_header_name is None: */ static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_header_field(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_field) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_on_header_field", 0); /* "httptools/parser/parser.pyx":112 * * cdef _on_header_field(self, bytes field): * self._maybe_call_on_header() # <<<<<<<<<<<<<< * if self._current_header_name is None: * self._current_header_name = field */ __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_self->__pyx_vtab)->_maybe_call_on_header(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":113 * cdef _on_header_field(self, bytes field): * self._maybe_call_on_header() * if self._current_header_name is None: # <<<<<<<<<<<<<< * self._current_header_name = field * else: */ __pyx_t_2 = (__pyx_v_self->_current_header_name == ((PyObject*)Py_None)); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "httptools/parser/parser.pyx":114 * self._maybe_call_on_header() * if self._current_header_name is None: * self._current_header_name = field # <<<<<<<<<<<<<< * else: * self._current_header_name += field */ __Pyx_INCREF(__pyx_v_field); __Pyx_GIVEREF(__pyx_v_field); __Pyx_GOTREF(__pyx_v_self->_current_header_name); __Pyx_DECREF(__pyx_v_self->_current_header_name); __pyx_v_self->_current_header_name = __pyx_v_field; /* "httptools/parser/parser.pyx":113 * cdef _on_header_field(self, bytes field): * self._maybe_call_on_header() * if self._current_header_name is None: # <<<<<<<<<<<<<< * self._current_header_name = field * else: */ goto __pyx_L3; } /* "httptools/parser/parser.pyx":116 * self._current_header_name = field * else: * self._current_header_name += field # <<<<<<<<<<<<<< * * cdef _on_header_value(self, bytes val): */ /*else*/ { __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_v_self->_current_header_name, __pyx_v_field); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 116, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v_self->_current_header_name); __Pyx_DECREF(__pyx_v_self->_current_header_name); __pyx_v_self->_current_header_name = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; } __pyx_L3:; /* "httptools/parser/parser.pyx":111 * current_header_value) * * cdef _on_header_field(self, bytes field): # <<<<<<<<<<<<<< * self._maybe_call_on_header() * if self._current_header_name is None: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("httptools.parser.parser.HttpParser._on_header_field", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":118 * self._current_header_name += field * * cdef _on_header_value(self, bytes val): # <<<<<<<<<<<<<< * if self._current_header_value is None: * self._current_header_value = val */ static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_header_value(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_val) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_on_header_value", 0); /* "httptools/parser/parser.pyx":119 * * cdef _on_header_value(self, bytes val): * if self._current_header_value is None: # <<<<<<<<<<<<<< * self._current_header_value = val * else: */ __pyx_t_1 = (__pyx_v_self->_current_header_value == ((PyObject*)Py_None)); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "httptools/parser/parser.pyx":120 * cdef _on_header_value(self, bytes val): * if self._current_header_value is None: * self._current_header_value = val # <<<<<<<<<<<<<< * else: * # This is unlikely, as mostly HTTP headers are one-line */ __Pyx_INCREF(__pyx_v_val); __Pyx_GIVEREF(__pyx_v_val); __Pyx_GOTREF(__pyx_v_self->_current_header_value); __Pyx_DECREF(__pyx_v_self->_current_header_value); __pyx_v_self->_current_header_value = __pyx_v_val; /* "httptools/parser/parser.pyx":119 * * cdef _on_header_value(self, bytes val): * if self._current_header_value is None: # <<<<<<<<<<<<<< * self._current_header_value = val * else: */ goto __pyx_L3; } /* "httptools/parser/parser.pyx":123 * else: * # This is unlikely, as mostly HTTP headers are one-line * self._current_header_value += val # <<<<<<<<<<<<<< * * cdef _on_headers_complete(self): */ /*else*/ { __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_self->_current_header_value, __pyx_v_val); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 123, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_current_header_value); __Pyx_DECREF(__pyx_v_self->_current_header_value); __pyx_v_self->_current_header_value = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "httptools/parser/parser.pyx":118 * self._current_header_name += field * * cdef _on_header_value(self, bytes val): # <<<<<<<<<<<<<< * if self._current_header_value is None: * self._current_header_value = val */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("httptools.parser.parser.HttpParser._on_header_value", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":125 * self._current_header_value += val * * cdef _on_headers_complete(self): # <<<<<<<<<<<<<< * self._maybe_call_on_header() * */ static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_headers_complete(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_on_headers_complete", 0); /* "httptools/parser/parser.pyx":126 * * cdef _on_headers_complete(self): * self._maybe_call_on_header() # <<<<<<<<<<<<<< * * if self._proto_on_headers_complete is not None: */ __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_self->__pyx_vtab)->_maybe_call_on_header(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 126, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":128 * self._maybe_call_on_header() * * if self._proto_on_headers_complete is not None: # <<<<<<<<<<<<<< * self._proto_on_headers_complete() * */ __pyx_t_2 = (__pyx_v_self->_proto_on_headers_complete != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "httptools/parser/parser.pyx":129 * * if self._proto_on_headers_complete is not None: * self._proto_on_headers_complete() # <<<<<<<<<<<<<< * * cdef _on_chunk_header(self): */ __Pyx_INCREF(__pyx_v_self->_proto_on_headers_complete); __pyx_t_4 = __pyx_v_self->_proto_on_headers_complete; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 129, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":128 * self._maybe_call_on_header() * * if self._proto_on_headers_complete is not None: # <<<<<<<<<<<<<< * self._proto_on_headers_complete() * */ } /* "httptools/parser/parser.pyx":125 * self._current_header_value += val * * cdef _on_headers_complete(self): # <<<<<<<<<<<<<< * self._maybe_call_on_header() * */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("httptools.parser.parser.HttpParser._on_headers_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":131 * self._proto_on_headers_complete() * * cdef _on_chunk_header(self): # <<<<<<<<<<<<<< * if (self._current_header_value is not None or * self._current_header_name is not None): */ static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_chunk_header(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_on_chunk_header", 0); /* "httptools/parser/parser.pyx":132 * * cdef _on_chunk_header(self): * if (self._current_header_value is not None or # <<<<<<<<<<<<<< * self._current_header_name is not None): * raise HttpParserError('invalid headers state') */ __pyx_t_2 = (__pyx_v_self->_current_header_value != ((PyObject*)Py_None)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } /* "httptools/parser/parser.pyx":133 * cdef _on_chunk_header(self): * if (self._current_header_value is not None or * self._current_header_name is not None): # <<<<<<<<<<<<<< * raise HttpParserError('invalid headers state') * */ __pyx_t_3 = (__pyx_v_self->_current_header_name != ((PyObject*)Py_None)); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; /* "httptools/parser/parser.pyx":132 * * cdef _on_chunk_header(self): * if (self._current_header_value is not None or # <<<<<<<<<<<<<< * self._current_header_name is not None): * raise HttpParserError('invalid headers state') */ if (unlikely(__pyx_t_1)) { /* "httptools/parser/parser.pyx":134 * if (self._current_header_value is not None or * self._current_header_name is not None): * raise HttpParserError('invalid headers state') # <<<<<<<<<<<<<< * * if self._proto_on_chunk_header is not None: */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_HttpParserError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_4 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_kp_u_invalid_headers_state) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_kp_u_invalid_headers_state); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 134, __pyx_L1_error) /* "httptools/parser/parser.pyx":132 * * cdef _on_chunk_header(self): * if (self._current_header_value is not None or # <<<<<<<<<<<<<< * self._current_header_name is not None): * raise HttpParserError('invalid headers state') */ } /* "httptools/parser/parser.pyx":136 * raise HttpParserError('invalid headers state') * * if self._proto_on_chunk_header is not None: # <<<<<<<<<<<<<< * self._proto_on_chunk_header() * */ __pyx_t_1 = (__pyx_v_self->_proto_on_chunk_header != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "httptools/parser/parser.pyx":137 * * if self._proto_on_chunk_header is not None: * self._proto_on_chunk_header() # <<<<<<<<<<<<<< * * cdef _on_chunk_complete(self): */ __Pyx_INCREF(__pyx_v_self->_proto_on_chunk_header); __pyx_t_5 = __pyx_v_self->_proto_on_chunk_header; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_4 = (__pyx_t_6) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6) : __Pyx_PyObject_CallNoArg(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "httptools/parser/parser.pyx":136 * raise HttpParserError('invalid headers state') * * if self._proto_on_chunk_header is not None: # <<<<<<<<<<<<<< * self._proto_on_chunk_header() * */ } /* "httptools/parser/parser.pyx":131 * self._proto_on_headers_complete() * * cdef _on_chunk_header(self): # <<<<<<<<<<<<<< * if (self._current_header_value is not None or * self._current_header_name is not None): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("httptools.parser.parser.HttpParser._on_chunk_header", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":139 * self._proto_on_chunk_header() * * cdef _on_chunk_complete(self): # <<<<<<<<<<<<<< * self._maybe_call_on_header() * */ static PyObject *__pyx_f_9httptools_6parser_6parser_10HttpParser__on_chunk_complete(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_on_chunk_complete", 0); /* "httptools/parser/parser.pyx":140 * * cdef _on_chunk_complete(self): * self._maybe_call_on_header() # <<<<<<<<<<<<<< * * if self._proto_on_chunk_complete is not None: */ __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_self->__pyx_vtab)->_maybe_call_on_header(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 140, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":142 * self._maybe_call_on_header() * * if self._proto_on_chunk_complete is not None: # <<<<<<<<<<<<<< * self._proto_on_chunk_complete() * */ __pyx_t_2 = (__pyx_v_self->_proto_on_chunk_complete != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "httptools/parser/parser.pyx":143 * * if self._proto_on_chunk_complete is not None: * self._proto_on_chunk_complete() # <<<<<<<<<<<<<< * * ### Public API ### */ __Pyx_INCREF(__pyx_v_self->_proto_on_chunk_complete); __pyx_t_4 = __pyx_v_self->_proto_on_chunk_complete; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":142 * self._maybe_call_on_header() * * if self._proto_on_chunk_complete is not None: # <<<<<<<<<<<<<< * self._proto_on_chunk_complete() * */ } /* "httptools/parser/parser.pyx":139 * self._proto_on_chunk_header() * * cdef _on_chunk_complete(self): # <<<<<<<<<<<<<< * self._maybe_call_on_header() * */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("httptools.parser.parser.HttpParser._on_chunk_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":147 * ### Public API ### * * def get_http_version(self): # <<<<<<<<<<<<<< * cdef cparser.llhttp_t* parser = self._cparser * return '{}.{}'.format(parser.http_major, parser.http_minor) */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_5get_http_version(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_5get_http_version(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("get_http_version (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser_4get_http_version(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_4get_http_version(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { llhttp_t *__pyx_v_parser; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations llhttp_t *__pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_http_version", 0); /* "httptools/parser/parser.pyx":148 * * def get_http_version(self): * cdef cparser.llhttp_t* parser = self._cparser # <<<<<<<<<<<<<< * return '{}.{}'.format(parser.http_major, parser.http_minor) * */ __pyx_t_1 = __pyx_v_self->_cparser; __pyx_v_parser = __pyx_t_1; /* "httptools/parser/parser.pyx":149 * def get_http_version(self): * cdef cparser.llhttp_t* parser = self._cparser * return '{}.{}'.format(parser.http_major, parser.http_minor) # <<<<<<<<<<<<<< * * def should_keep_alive(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_, __pyx_n_s_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 149, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_From_uint8_t(__pyx_v_parser->http_major); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 149, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_uint8_t(__pyx_v_parser->http_minor); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 149, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_4, __pyx_t_5}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 149, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_4, __pyx_t_5}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 149, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 149, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_t_5); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 149, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "httptools/parser/parser.pyx":147 * ### Public API ### * * def get_http_version(self): # <<<<<<<<<<<<<< * cdef cparser.llhttp_t* parser = self._cparser * return '{}.{}'.format(parser.http_major, parser.http_minor) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("httptools.parser.parser.HttpParser.get_http_version", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":151 * return '{}.{}'.format(parser.http_major, parser.http_minor) * * def should_keep_alive(self): # <<<<<<<<<<<<<< * return bool(cparser.llhttp_should_keep_alive(self._cparser)) * */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_7should_keep_alive(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_7should_keep_alive(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("should_keep_alive (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser_6should_keep_alive(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_6should_keep_alive(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("should_keep_alive", 0); /* "httptools/parser/parser.pyx":152 * * def should_keep_alive(self): * return bool(cparser.llhttp_should_keep_alive(self._cparser)) # <<<<<<<<<<<<<< * * def should_upgrade(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(llhttp_should_keep_alive(__pyx_v_self->_cparser)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 152, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyBool_FromLong((!(!__pyx_t_2))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "httptools/parser/parser.pyx":151 * return '{}.{}'.format(parser.http_major, parser.http_minor) * * def should_keep_alive(self): # <<<<<<<<<<<<<< * return bool(cparser.llhttp_should_keep_alive(self._cparser)) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("httptools.parser.parser.HttpParser.should_keep_alive", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":154 * return bool(cparser.llhttp_should_keep_alive(self._cparser)) * * def should_upgrade(self): # <<<<<<<<<<<<<< * cdef cparser.llhttp_t* parser = self._cparser * return bool(parser.upgrade) */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_9should_upgrade(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_9should_upgrade(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("should_upgrade (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser_8should_upgrade(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_8should_upgrade(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { llhttp_t *__pyx_v_parser; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations llhttp_t *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("should_upgrade", 0); /* "httptools/parser/parser.pyx":155 * * def should_upgrade(self): * cdef cparser.llhttp_t* parser = self._cparser # <<<<<<<<<<<<<< * return bool(parser.upgrade) * */ __pyx_t_1 = __pyx_v_self->_cparser; __pyx_v_parser = __pyx_t_1; /* "httptools/parser/parser.pyx":156 * def should_upgrade(self): * cdef cparser.llhttp_t* parser = self._cparser * return bool(parser.upgrade) # <<<<<<<<<<<<<< * * def feed_data(self, data): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_uint8_t(__pyx_v_parser->upgrade); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyBool_FromLong((!(!__pyx_t_3))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "httptools/parser/parser.pyx":154 * return bool(cparser.llhttp_should_keep_alive(self._cparser)) * * def should_upgrade(self): # <<<<<<<<<<<<<< * cdef cparser.llhttp_t* parser = self._cparser * return bool(parser.upgrade) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("httptools.parser.parser.HttpParser.should_upgrade", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":158 * return bool(parser.upgrade) * * def feed_data(self, data): # <<<<<<<<<<<<<< * cdef: * size_t data_len */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_11feed_data(PyObject *__pyx_v_self, PyObject *__pyx_v_data); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_11feed_data(PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("feed_data (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser_10feed_data(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self), ((PyObject *)__pyx_v_data)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_10feed_data(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_data) { size_t __pyx_v_data_len; llhttp_errno_t __pyx_v_err; Py_buffer *__pyx_v_buf; int __pyx_v_owning_buf; char *__pyx_v_err_pos; PyObject *__pyx_v_ex = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; char const *__pyx_t_9; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; PyObject *__pyx_t_13 = NULL; PyObject *__pyx_t_14 = NULL; PyObject *__pyx_t_15 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("feed_data", 0); /* "httptools/parser/parser.pyx":163 * cparser.llhttp_errno_t err * Py_buffer *buf * bint owning_buf = False # <<<<<<<<<<<<<< * char* err_pos * */ __pyx_v_owning_buf = 0; /* "httptools/parser/parser.pyx":166 * char* err_pos * * if PyMemoryView_Check(data): # <<<<<<<<<<<<<< * buf = PyMemoryView_GET_BUFFER(data) * data_len = <size_t>buf.len */ __pyx_t_1 = (PyMemoryView_Check(__pyx_v_data) != 0); if (__pyx_t_1) { /* "httptools/parser/parser.pyx":167 * * if PyMemoryView_Check(data): * buf = PyMemoryView_GET_BUFFER(data) # <<<<<<<<<<<<<< * data_len = <size_t>buf.len * err = cparser.llhttp_execute( */ __pyx_v_buf = PyMemoryView_GET_BUFFER(__pyx_v_data); /* "httptools/parser/parser.pyx":168 * if PyMemoryView_Check(data): * buf = PyMemoryView_GET_BUFFER(data) * data_len = <size_t>buf.len # <<<<<<<<<<<<<< * err = cparser.llhttp_execute( * self._cparser, */ __pyx_v_data_len = ((size_t)__pyx_v_buf->len); /* "httptools/parser/parser.pyx":169 * buf = PyMemoryView_GET_BUFFER(data) * data_len = <size_t>buf.len * err = cparser.llhttp_execute( # <<<<<<<<<<<<<< * self._cparser, * <char*>buf.buf, */ __pyx_v_err = llhttp_execute(__pyx_v_self->_cparser, ((char *)__pyx_v_buf->buf), __pyx_v_data_len); /* "httptools/parser/parser.pyx":166 * char* err_pos * * if PyMemoryView_Check(data): # <<<<<<<<<<<<<< * buf = PyMemoryView_GET_BUFFER(data) * data_len = <size_t>buf.len */ goto __pyx_L3; } /* "httptools/parser/parser.pyx":175 * * else: * buf = &self.py_buf # <<<<<<<<<<<<<< * PyObject_GetBuffer(data, buf, PyBUF_SIMPLE) * owning_buf = True */ /*else*/ { __pyx_v_buf = (&__pyx_v_self->py_buf); /* "httptools/parser/parser.pyx":176 * else: * buf = &self.py_buf * PyObject_GetBuffer(data, buf, PyBUF_SIMPLE) # <<<<<<<<<<<<<< * owning_buf = True * data_len = <size_t>buf.len */ __pyx_t_2 = PyObject_GetBuffer(__pyx_v_data, __pyx_v_buf, PyBUF_SIMPLE); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 176, __pyx_L1_error) /* "httptools/parser/parser.pyx":177 * buf = &self.py_buf * PyObject_GetBuffer(data, buf, PyBUF_SIMPLE) * owning_buf = True # <<<<<<<<<<<<<< * data_len = <size_t>buf.len * */ __pyx_v_owning_buf = 1; /* "httptools/parser/parser.pyx":178 * PyObject_GetBuffer(data, buf, PyBUF_SIMPLE) * owning_buf = True * data_len = <size_t>buf.len # <<<<<<<<<<<<<< * * err = cparser.llhttp_execute( */ __pyx_v_data_len = ((size_t)__pyx_v_buf->len); /* "httptools/parser/parser.pyx":180 * data_len = <size_t>buf.len * * err = cparser.llhttp_execute( # <<<<<<<<<<<<<< * self._cparser, * <char*>buf.buf, */ __pyx_v_err = llhttp_execute(__pyx_v_self->_cparser, ((char *)__pyx_v_buf->buf), __pyx_v_data_len); } __pyx_L3:; /* "httptools/parser/parser.pyx":185 * data_len) * * try: # <<<<<<<<<<<<<< * if self._cparser.upgrade == 1 and err == cparser.HPE_PAUSED_UPGRADE: * err_pos = cparser.llhttp_get_error_pos(self._cparser) */ /*try:*/ { /* "httptools/parser/parser.pyx":186 * * try: * if self._cparser.upgrade == 1 and err == cparser.HPE_PAUSED_UPGRADE: # <<<<<<<<<<<<<< * err_pos = cparser.llhttp_get_error_pos(self._cparser) * */ __pyx_t_3 = ((__pyx_v_self->_cparser->upgrade == 1) != 0); if (__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L8_bool_binop_done; } __pyx_t_3 = ((__pyx_v_err == HPE_PAUSED_UPGRADE) != 0); __pyx_t_1 = __pyx_t_3; __pyx_L8_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "httptools/parser/parser.pyx":187 * try: * if self._cparser.upgrade == 1 and err == cparser.HPE_PAUSED_UPGRADE: * err_pos = cparser.llhttp_get_error_pos(self._cparser) # <<<<<<<<<<<<<< * * # Immediately free the parser from "error" state, simulating */ __pyx_v_err_pos = llhttp_get_error_pos(__pyx_v_self->_cparser); /* "httptools/parser/parser.pyx":193 * # allow users manually "resume after upgrade", and 2) the use * # case for resuming parsing is very rare. * cparser.llhttp_resume_after_upgrade(self._cparser) # <<<<<<<<<<<<<< * * # The err_pos here is specific for the input buf. So if we ever */ llhttp_resume_after_upgrade(__pyx_v_self->_cparser); /* "httptools/parser/parser.pyx":199 * # successive calls to feed_data() until resume_after_upgrade is * # called), we have to store the result and keep our own state. * raise HttpParserUpgrade(err_pos - <char*>buf.buf) # <<<<<<<<<<<<<< * finally: * if owning_buf: */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_HttpParserUpgrade); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 199, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyInt_From_ptrdiff_t((__pyx_v_err_pos - ((char *)__pyx_v_buf->buf))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 199, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_4 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_7, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 199, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 199, __pyx_L5_error) /* "httptools/parser/parser.pyx":186 * * try: * if self._cparser.upgrade == 1 and err == cparser.HPE_PAUSED_UPGRADE: # <<<<<<<<<<<<<< * err_pos = cparser.llhttp_get_error_pos(self._cparser) * */ } } /* "httptools/parser/parser.pyx":201 * raise HttpParserUpgrade(err_pos - <char*>buf.buf) * finally: * if owning_buf: # <<<<<<<<<<<<<< * PyBuffer_Release(buf) * */ /*finally:*/ { /*normal exit:*/{ __pyx_t_1 = (__pyx_v_owning_buf != 0); if (__pyx_t_1) { /* "httptools/parser/parser.pyx":202 * finally: * if owning_buf: * PyBuffer_Release(buf) # <<<<<<<<<<<<<< * * if err != cparser.HPE_OK: */ PyBuffer_Release(__pyx_v_buf); /* "httptools/parser/parser.pyx":201 * raise HttpParserUpgrade(err_pos - <char*>buf.buf) * finally: * if owning_buf: # <<<<<<<<<<<<<< * PyBuffer_Release(buf) * */ } goto __pyx_L6; } __pyx_L5_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_13, &__pyx_t_14, &__pyx_t_15); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12) < 0)) __Pyx_ErrFetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __Pyx_XGOTREF(__pyx_t_13); __Pyx_XGOTREF(__pyx_t_14); __Pyx_XGOTREF(__pyx_t_15); __pyx_t_2 = __pyx_lineno; __pyx_t_8 = __pyx_clineno; __pyx_t_9 = __pyx_filename; { __pyx_t_1 = (__pyx_v_owning_buf != 0); if (__pyx_t_1) { /* "httptools/parser/parser.pyx":202 * finally: * if owning_buf: * PyBuffer_Release(buf) # <<<<<<<<<<<<<< * * if err != cparser.HPE_OK: */ PyBuffer_Release(__pyx_v_buf); /* "httptools/parser/parser.pyx":201 * raise HttpParserUpgrade(err_pos - <char*>buf.buf) * finally: * if owning_buf: # <<<<<<<<<<<<<< * PyBuffer_Release(buf) * */ } } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_13); __Pyx_XGIVEREF(__pyx_t_14); __Pyx_XGIVEREF(__pyx_t_15); __Pyx_ExceptionReset(__pyx_t_13, __pyx_t_14, __pyx_t_15); } __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ErrRestore(__pyx_t_10, __pyx_t_11, __pyx_t_12); __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0; __pyx_lineno = __pyx_t_2; __pyx_clineno = __pyx_t_8; __pyx_filename = __pyx_t_9; goto __pyx_L1_error; } __pyx_L6:; } /* "httptools/parser/parser.pyx":204 * PyBuffer_Release(buf) * * if err != cparser.HPE_OK: # <<<<<<<<<<<<<< * ex = parser_error_from_errno( * self._cparser, */ __pyx_t_1 = ((__pyx_v_err != HPE_OK) != 0); if (__pyx_t_1) { /* "httptools/parser/parser.pyx":205 * * if err != cparser.HPE_OK: * ex = parser_error_from_errno( # <<<<<<<<<<<<<< * self._cparser, * <cparser.llhttp_errno_t> self._cparser.error) */ __pyx_t_4 = __pyx_f_9httptools_6parser_6parser_parser_error_from_errno(__pyx_v_self->_cparser, ((llhttp_errno_t)__pyx_v_self->_cparser->error)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 205, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_v_ex = __pyx_t_4; __pyx_t_4 = 0; /* "httptools/parser/parser.pyx":208 * self._cparser, * <cparser.llhttp_errno_t> self._cparser.error) * if isinstance(ex, HttpParserCallbackError): # <<<<<<<<<<<<<< * if self._last_error is not None: * ex.__context__ = self._last_error */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_HttpParserCallbackError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyObject_IsInstance(__pyx_v_ex, __pyx_t_4); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 208, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_3 = (__pyx_t_1 != 0); if (__pyx_t_3) { /* "httptools/parser/parser.pyx":209 * <cparser.llhttp_errno_t> self._cparser.error) * if isinstance(ex, HttpParserCallbackError): * if self._last_error is not None: # <<<<<<<<<<<<<< * ex.__context__ = self._last_error * self._last_error = None */ __pyx_t_3 = (__pyx_v_self->_last_error != Py_None); __pyx_t_1 = (__pyx_t_3 != 0); if (__pyx_t_1) { /* "httptools/parser/parser.pyx":210 * if isinstance(ex, HttpParserCallbackError): * if self._last_error is not None: * ex.__context__ = self._last_error # <<<<<<<<<<<<<< * self._last_error = None * raise ex */ __pyx_t_4 = __pyx_v_self->_last_error; __Pyx_INCREF(__pyx_t_4); if (__Pyx_PyObject_SetAttrStr(__pyx_v_ex, __pyx_n_s_context, __pyx_t_4) < 0) __PYX_ERR(0, 210, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "httptools/parser/parser.pyx":211 * if self._last_error is not None: * ex.__context__ = self._last_error * self._last_error = None # <<<<<<<<<<<<<< * raise ex * */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_self->_last_error); __Pyx_DECREF(__pyx_v_self->_last_error); __pyx_v_self->_last_error = Py_None; /* "httptools/parser/parser.pyx":209 * <cparser.llhttp_errno_t> self._cparser.error) * if isinstance(ex, HttpParserCallbackError): * if self._last_error is not None: # <<<<<<<<<<<<<< * ex.__context__ = self._last_error * self._last_error = None */ } /* "httptools/parser/parser.pyx":208 * self._cparser, * <cparser.llhttp_errno_t> self._cparser.error) * if isinstance(ex, HttpParserCallbackError): # <<<<<<<<<<<<<< * if self._last_error is not None: * ex.__context__ = self._last_error */ } /* "httptools/parser/parser.pyx":212 * ex.__context__ = self._last_error * self._last_error = None * raise ex # <<<<<<<<<<<<<< * * */ __Pyx_Raise(__pyx_v_ex, 0, 0, 0); __PYX_ERR(0, 212, __pyx_L1_error) /* "httptools/parser/parser.pyx":204 * PyBuffer_Release(buf) * * if err != cparser.HPE_OK: # <<<<<<<<<<<<<< * ex = parser_error_from_errno( * self._cparser, */ } /* "httptools/parser/parser.pyx":158 * return bool(parser.upgrade) * * def feed_data(self, data): # <<<<<<<<<<<<<< * cdef: * size_t data_len */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("httptools.parser.parser.HttpParser.feed_data", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_ex); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_13__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_13__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser_12__reduce_cython__(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_12__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("httptools.parser.parser.HttpParser.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_15__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_6parser_10HttpParser_15__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_6parser_10HttpParser_14__setstate_cython__(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_6parser_10HttpParser_14__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("httptools.parser.parser.HttpParser.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":217 * cdef class HttpRequestParser(HttpParser): * * def __init__(self, protocol): # <<<<<<<<<<<<<< * self._init(protocol, cparser.HTTP_REQUEST) * */ /* Python wrapper */ static int __pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_protocol = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_protocol,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_protocol)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 217, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_protocol = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 217, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("httptools.parser.parser.HttpRequestParser.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_9httptools_6parser_6parser_17HttpRequestParser___init__(((struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *)__pyx_v_self), __pyx_v_protocol); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_9httptools_6parser_6parser_17HttpRequestParser___init__(struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self, PyObject *__pyx_v_protocol) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "httptools/parser/parser.pyx":218 * * def __init__(self, protocol): * self._init(protocol, cparser.HTTP_REQUEST) # <<<<<<<<<<<<<< * * self._proto_on_url = getattr(protocol, 'on_url', None) */ __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpRequestParser *)__pyx_v_self->__pyx_base.__pyx_vtab)->__pyx_base._init(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self), __pyx_v_protocol, HTTP_REQUEST); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":220 * self._init(protocol, cparser.HTTP_REQUEST) * * self._proto_on_url = getattr(protocol, 'on_url', None) # <<<<<<<<<<<<<< * if self._proto_on_url is not None: * self._csettings.on_url = cb_on_url */ __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_url, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 220, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v_self->__pyx_base._proto_on_url); __Pyx_DECREF(__pyx_v_self->__pyx_base._proto_on_url); __pyx_v_self->__pyx_base._proto_on_url = __pyx_t_1; __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":221 * * self._proto_on_url = getattr(protocol, 'on_url', None) * if self._proto_on_url is not None: # <<<<<<<<<<<<<< * self._csettings.on_url = cb_on_url * */ __pyx_t_2 = (__pyx_v_self->__pyx_base._proto_on_url != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "httptools/parser/parser.pyx":222 * self._proto_on_url = getattr(protocol, 'on_url', None) * if self._proto_on_url is not None: * self._csettings.on_url = cb_on_url # <<<<<<<<<<<<<< * * def get_method(self): */ __pyx_v_self->__pyx_base._csettings->on_url = __pyx_f_9httptools_6parser_6parser_cb_on_url; /* "httptools/parser/parser.pyx":221 * * self._proto_on_url = getattr(protocol, 'on_url', None) * if self._proto_on_url is not None: # <<<<<<<<<<<<<< * self._csettings.on_url = cb_on_url * */ } /* "httptools/parser/parser.pyx":217 * cdef class HttpRequestParser(HttpParser): * * def __init__(self, protocol): # <<<<<<<<<<<<<< * self._init(protocol, cparser.HTTP_REQUEST) * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("httptools.parser.parser.HttpRequestParser.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":224 * self._csettings.on_url = cb_on_url * * def get_method(self): # <<<<<<<<<<<<<< * cdef cparser.llhttp_t* parser = self._cparser * return cparser.llhttp_method_name(<cparser.llhttp_method_t> parser.method) */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_3get_method(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_3get_method(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("get_method (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_2get_method(((struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_2get_method(struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self) { llhttp_t *__pyx_v_parser; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations llhttp_t *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_method", 0); /* "httptools/parser/parser.pyx":225 * * def get_method(self): * cdef cparser.llhttp_t* parser = self._cparser # <<<<<<<<<<<<<< * return cparser.llhttp_method_name(<cparser.llhttp_method_t> parser.method) * */ __pyx_t_1 = __pyx_v_self->__pyx_base._cparser; __pyx_v_parser = __pyx_t_1; /* "httptools/parser/parser.pyx":226 * def get_method(self): * cdef cparser.llhttp_t* parser = self._cparser * return cparser.llhttp_method_name(<cparser.llhttp_method_t> parser.method) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBytes_FromString(llhttp_method_name(((llhttp_method_t)__pyx_v_parser->method))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "httptools/parser/parser.pyx":224 * self._csettings.on_url = cb_on_url * * def get_method(self): # <<<<<<<<<<<<<< * cdef cparser.llhttp_t* parser = self._cparser * return cparser.llhttp_method_name(<cparser.llhttp_method_t> parser.method) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("httptools.parser.parser.HttpRequestParser.get_method", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_4__reduce_cython__(((struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_4__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("httptools.parser.parser.HttpRequestParser.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_6__setstate_cython__(((struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_6parser_17HttpRequestParser_6__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("httptools.parser.parser.HttpRequestParser.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":231 * cdef class HttpResponseParser(HttpParser): * * def __init__(self, protocol): # <<<<<<<<<<<<<< * self._init(protocol, cparser.HTTP_RESPONSE) * */ /* Python wrapper */ static int __pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_protocol = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_protocol,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_protocol)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 231, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_protocol = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 231, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("httptools.parser.parser.HttpResponseParser.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_9httptools_6parser_6parser_18HttpResponseParser___init__(((struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *)__pyx_v_self), __pyx_v_protocol); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_9httptools_6parser_6parser_18HttpResponseParser___init__(struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self, PyObject *__pyx_v_protocol) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "httptools/parser/parser.pyx":232 * * def __init__(self, protocol): * self._init(protocol, cparser.HTTP_RESPONSE) # <<<<<<<<<<<<<< * * self._proto_on_status = getattr(protocol, 'on_status', None) */ __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpResponseParser *)__pyx_v_self->__pyx_base.__pyx_vtab)->__pyx_base._init(((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_v_self), __pyx_v_protocol, HTTP_RESPONSE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":234 * self._init(protocol, cparser.HTTP_RESPONSE) * * self._proto_on_status = getattr(protocol, 'on_status', None) # <<<<<<<<<<<<<< * if self._proto_on_status is not None: * self._csettings.on_status = cb_on_status */ __pyx_t_1 = __Pyx_GetAttr3(__pyx_v_protocol, __pyx_n_u_on_status, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v_self->__pyx_base._proto_on_status); __Pyx_DECREF(__pyx_v_self->__pyx_base._proto_on_status); __pyx_v_self->__pyx_base._proto_on_status = __pyx_t_1; __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":235 * * self._proto_on_status = getattr(protocol, 'on_status', None) * if self._proto_on_status is not None: # <<<<<<<<<<<<<< * self._csettings.on_status = cb_on_status * */ __pyx_t_2 = (__pyx_v_self->__pyx_base._proto_on_status != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "httptools/parser/parser.pyx":236 * self._proto_on_status = getattr(protocol, 'on_status', None) * if self._proto_on_status is not None: * self._csettings.on_status = cb_on_status # <<<<<<<<<<<<<< * * def get_status_code(self): */ __pyx_v_self->__pyx_base._csettings->on_status = __pyx_f_9httptools_6parser_6parser_cb_on_status; /* "httptools/parser/parser.pyx":235 * * self._proto_on_status = getattr(protocol, 'on_status', None) * if self._proto_on_status is not None: # <<<<<<<<<<<<<< * self._csettings.on_status = cb_on_status * */ } /* "httptools/parser/parser.pyx":231 * cdef class HttpResponseParser(HttpParser): * * def __init__(self, protocol): # <<<<<<<<<<<<<< * self._init(protocol, cparser.HTTP_RESPONSE) * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("httptools.parser.parser.HttpResponseParser.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":238 * self._csettings.on_status = cb_on_status * * def get_status_code(self): # <<<<<<<<<<<<<< * cdef cparser.llhttp_t* parser = self._cparser * return parser.status_code */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_3get_status_code(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_3get_status_code(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("get_status_code (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_2get_status_code(((struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_2get_status_code(struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self) { llhttp_t *__pyx_v_parser; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations llhttp_t *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_status_code", 0); /* "httptools/parser/parser.pyx":239 * * def get_status_code(self): * cdef cparser.llhttp_t* parser = self._cparser # <<<<<<<<<<<<<< * return parser.status_code * */ __pyx_t_1 = __pyx_v_self->__pyx_base._cparser; __pyx_v_parser = __pyx_t_1; /* "httptools/parser/parser.pyx":240 * def get_status_code(self): * cdef cparser.llhttp_t* parser = self._cparser * return parser.status_code # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_uint16_t(__pyx_v_parser->status_code); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "httptools/parser/parser.pyx":238 * self._csettings.on_status = cb_on_status * * def get_status_code(self): # <<<<<<<<<<<<<< * cdef cparser.llhttp_t* parser = self._cparser * return parser.status_code */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("httptools.parser.parser.HttpResponseParser.get_status_code", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_4__reduce_cython__(((struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_4__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("httptools.parser.parser.HttpResponseParser.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_6__setstate_cython__(((struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9httptools_6parser_6parser_18HttpResponseParser_6__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("httptools.parser.parser.HttpResponseParser.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":243 * * * cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< * cdef HttpParser pyparser = <HttpParser>parser.data * try: */ static int __pyx_f_9httptools_6parser_6parser_cb_on_message_begin(llhttp_t *__pyx_v_parser) { struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; PyObject *__pyx_v_ex = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cb_on_message_begin", 0); /* "httptools/parser/parser.pyx":244 * * cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<< * try: * pyparser._proto_on_message_begin() */ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); __Pyx_INCREF(__pyx_t_1); __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":245 * cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_message_begin() * except BaseException as ex: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "httptools/parser/parser.pyx":246 * cdef HttpParser pyparser = <HttpParser>parser.data * try: * pyparser._proto_on_message_begin() # <<<<<<<<<<<<<< * except BaseException as ex: * pyparser._last_error = ex */ __Pyx_INCREF(__pyx_v_pyparser->_proto_on_message_begin); __pyx_t_5 = __pyx_v_pyparser->_proto_on_message_begin; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_1 = (__pyx_t_6) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6) : __Pyx_PyObject_CallNoArg(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 246, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":245 * cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_message_begin() * except BaseException as ex: */ } /* "httptools/parser/parser.pyx":251 * return -1 * else: * return 0 # <<<<<<<<<<<<<< * * */ /*else:*/ { __pyx_r = 0; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; /* "httptools/parser/parser.pyx":247 * try: * pyparser._proto_on_message_begin() * except BaseException as ex: # <<<<<<<<<<<<<< * pyparser._last_error = ex * return -1 */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); if (__pyx_t_7) { __Pyx_AddTraceback("httptools.parser.parser.cb_on_message_begin", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_6) < 0) __PYX_ERR(0, 247, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __pyx_v_ex = __pyx_t_5; /*try:*/ { /* "httptools/parser/parser.pyx":248 * pyparser._proto_on_message_begin() * except BaseException as ex: * pyparser._last_error = ex # <<<<<<<<<<<<<< * return -1 * else: */ __Pyx_INCREF(__pyx_v_ex); __Pyx_GIVEREF(__pyx_v_ex); __Pyx_GOTREF(__pyx_v_pyparser->_last_error); __Pyx_DECREF(__pyx_v_pyparser->_last_error); __pyx_v_pyparser->_last_error = __pyx_v_ex; /* "httptools/parser/parser.pyx":249 * except BaseException as ex: * pyparser._last_error = ex * return -1 # <<<<<<<<<<<<<< * else: * return 0 */ __pyx_r = -1; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L13_return; } /* "httptools/parser/parser.pyx":247 * try: * pyparser._proto_on_message_begin() * except BaseException as ex: # <<<<<<<<<<<<<< * pyparser._last_error = ex * return -1 */ /*finally:*/ { __pyx_L13_return: { __pyx_t_7 = __pyx_r; __Pyx_DECREF(__pyx_v_ex); __pyx_v_ex = NULL; __pyx_r = __pyx_t_7; goto __pyx_L6_except_return; } } } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "httptools/parser/parser.pyx":245 * cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_message_begin() * except BaseException as ex: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "httptools/parser/parser.pyx":243 * * * cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< * cdef HttpParser pyparser = <HttpParser>parser.data * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("httptools.parser.parser.cb_on_message_begin", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); __Pyx_XDECREF(__pyx_v_ex); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":254 * * * cdef int cb_on_url(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data */ static int __pyx_f_9httptools_6parser_6parser_cb_on_url(llhttp_t *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) { struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; PyObject *__pyx_v_ex = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cb_on_url", 0); /* "httptools/parser/parser.pyx":256 * cdef int cb_on_url(cparser.llhttp_t* parser, * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<< * try: * pyparser._proto_on_url(at[:length]) */ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); __Pyx_INCREF(__pyx_t_1); __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":257 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_url(at[:length]) * except BaseException as ex: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "httptools/parser/parser.pyx":258 * cdef HttpParser pyparser = <HttpParser>parser.data * try: * pyparser._proto_on_url(at[:length]) # <<<<<<<<<<<<<< * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_url` callback error") */ __pyx_t_5 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_at + 0, __pyx_v_length - 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 258, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_pyparser->_proto_on_url); __pyx_t_6 = __pyx_v_pyparser->_proto_on_url; __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } __pyx_t_1 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_7, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 258, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":257 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_url(at[:length]) * except BaseException as ex: */ } /* "httptools/parser/parser.pyx":264 * return cparser.HPE_USER * else: * return 0 # <<<<<<<<<<<<<< * * */ /*else:*/ { __pyx_r = 0; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "httptools/parser/parser.pyx":259 * try: * pyparser._proto_on_url(at[:length]) * except BaseException as ex: # <<<<<<<<<<<<<< * cparser.llhttp_set_error_reason(parser, "`on_url` callback error") * pyparser._last_error = ex */ __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); if (__pyx_t_8) { __Pyx_AddTraceback("httptools.parser.parser.cb_on_url", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_5) < 0) __PYX_ERR(0, 259, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __pyx_v_ex = __pyx_t_6; /*try:*/ { /* "httptools/parser/parser.pyx":260 * pyparser._proto_on_url(at[:length]) * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_url` callback error") # <<<<<<<<<<<<<< * pyparser._last_error = ex * return cparser.HPE_USER */ llhttp_set_error_reason(__pyx_v_parser, ((char const *)"`on_url` callback error")); /* "httptools/parser/parser.pyx":261 * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_url` callback error") * pyparser._last_error = ex # <<<<<<<<<<<<<< * return cparser.HPE_USER * else: */ __Pyx_INCREF(__pyx_v_ex); __Pyx_GIVEREF(__pyx_v_ex); __Pyx_GOTREF(__pyx_v_pyparser->_last_error); __Pyx_DECREF(__pyx_v_pyparser->_last_error); __pyx_v_pyparser->_last_error = __pyx_v_ex; /* "httptools/parser/parser.pyx":262 * cparser.llhttp_set_error_reason(parser, "`on_url` callback error") * pyparser._last_error = ex * return cparser.HPE_USER # <<<<<<<<<<<<<< * else: * return 0 */ __pyx_r = HPE_USER; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L13_return; } /* "httptools/parser/parser.pyx":259 * try: * pyparser._proto_on_url(at[:length]) * except BaseException as ex: # <<<<<<<<<<<<<< * cparser.llhttp_set_error_reason(parser, "`on_url` callback error") * pyparser._last_error = ex */ /*finally:*/ { __pyx_L13_return: { __pyx_t_8 = __pyx_r; __Pyx_DECREF(__pyx_v_ex); __pyx_v_ex = NULL; __pyx_r = __pyx_t_8; goto __pyx_L6_except_return; } } } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "httptools/parser/parser.pyx":257 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_url(at[:length]) * except BaseException as ex: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "httptools/parser/parser.pyx":254 * * * cdef int cb_on_url(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("httptools.parser.parser.cb_on_url", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); __Pyx_XDECREF(__pyx_v_ex); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":267 * * * cdef int cb_on_status(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data */ static int __pyx_f_9httptools_6parser_6parser_cb_on_status(llhttp_t *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) { struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; PyObject *__pyx_v_ex = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cb_on_status", 0); /* "httptools/parser/parser.pyx":269 * cdef int cb_on_status(cparser.llhttp_t* parser, * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<< * try: * pyparser._proto_on_status(at[:length]) */ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); __Pyx_INCREF(__pyx_t_1); __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":270 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_status(at[:length]) * except BaseException as ex: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "httptools/parser/parser.pyx":271 * cdef HttpParser pyparser = <HttpParser>parser.data * try: * pyparser._proto_on_status(at[:length]) # <<<<<<<<<<<<<< * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_status` callback error") */ __pyx_t_5 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_at + 0, __pyx_v_length - 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 271, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_pyparser->_proto_on_status); __pyx_t_6 = __pyx_v_pyparser->_proto_on_status; __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } __pyx_t_1 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_7, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 271, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":270 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_status(at[:length]) * except BaseException as ex: */ } /* "httptools/parser/parser.pyx":277 * return cparser.HPE_USER * else: * return 0 # <<<<<<<<<<<<<< * * */ /*else:*/ { __pyx_r = 0; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "httptools/parser/parser.pyx":272 * try: * pyparser._proto_on_status(at[:length]) * except BaseException as ex: # <<<<<<<<<<<<<< * cparser.llhttp_set_error_reason(parser, "`on_status` callback error") * pyparser._last_error = ex */ __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); if (__pyx_t_8) { __Pyx_AddTraceback("httptools.parser.parser.cb_on_status", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_5) < 0) __PYX_ERR(0, 272, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __pyx_v_ex = __pyx_t_6; /*try:*/ { /* "httptools/parser/parser.pyx":273 * pyparser._proto_on_status(at[:length]) * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_status` callback error") # <<<<<<<<<<<<<< * pyparser._last_error = ex * return cparser.HPE_USER */ llhttp_set_error_reason(__pyx_v_parser, ((char const *)"`on_status` callback error")); /* "httptools/parser/parser.pyx":274 * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_status` callback error") * pyparser._last_error = ex # <<<<<<<<<<<<<< * return cparser.HPE_USER * else: */ __Pyx_INCREF(__pyx_v_ex); __Pyx_GIVEREF(__pyx_v_ex); __Pyx_GOTREF(__pyx_v_pyparser->_last_error); __Pyx_DECREF(__pyx_v_pyparser->_last_error); __pyx_v_pyparser->_last_error = __pyx_v_ex; /* "httptools/parser/parser.pyx":275 * cparser.llhttp_set_error_reason(parser, "`on_status` callback error") * pyparser._last_error = ex * return cparser.HPE_USER # <<<<<<<<<<<<<< * else: * return 0 */ __pyx_r = HPE_USER; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L13_return; } /* "httptools/parser/parser.pyx":272 * try: * pyparser._proto_on_status(at[:length]) * except BaseException as ex: # <<<<<<<<<<<<<< * cparser.llhttp_set_error_reason(parser, "`on_status` callback error") * pyparser._last_error = ex */ /*finally:*/ { __pyx_L13_return: { __pyx_t_8 = __pyx_r; __Pyx_DECREF(__pyx_v_ex); __pyx_v_ex = NULL; __pyx_r = __pyx_t_8; goto __pyx_L6_except_return; } } } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "httptools/parser/parser.pyx":270 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_status(at[:length]) * except BaseException as ex: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "httptools/parser/parser.pyx":267 * * * cdef int cb_on_status(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("httptools.parser.parser.cb_on_status", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); __Pyx_XDECREF(__pyx_v_ex); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":280 * * * cdef int cb_on_header_field(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data */ static int __pyx_f_9httptools_6parser_6parser_cb_on_header_field(llhttp_t *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) { struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; PyObject *__pyx_v_ex = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cb_on_header_field", 0); /* "httptools/parser/parser.pyx":282 * cdef int cb_on_header_field(cparser.llhttp_t* parser, * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<< * try: * pyparser._on_header_field(at[:length]) */ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); __Pyx_INCREF(__pyx_t_1); __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":283 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_header_field(at[:length]) * except BaseException as ex: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "httptools/parser/parser.pyx":284 * cdef HttpParser pyparser = <HttpParser>parser.data * try: * pyparser._on_header_field(at[:length]) # <<<<<<<<<<<<<< * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_header_field` callback error") */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_at + 0, __pyx_v_length - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 284, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_header_field(__pyx_v_pyparser, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 284, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "httptools/parser/parser.pyx":283 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_header_field(at[:length]) * except BaseException as ex: */ } /* "httptools/parser/parser.pyx":290 * return cparser.HPE_USER * else: * return 0 # <<<<<<<<<<<<<< * * */ /*else:*/ { __pyx_r = 0; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; /* "httptools/parser/parser.pyx":285 * try: * pyparser._on_header_field(at[:length]) * except BaseException as ex: # <<<<<<<<<<<<<< * cparser.llhttp_set_error_reason(parser, "`on_header_field` callback error") * pyparser._last_error = ex */ __pyx_t_6 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); if (__pyx_t_6) { __Pyx_AddTraceback("httptools.parser.parser.cb_on_header_field", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_1, &__pyx_t_7) < 0) __PYX_ERR(0, 285, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_t_1); __pyx_v_ex = __pyx_t_1; /*try:*/ { /* "httptools/parser/parser.pyx":286 * pyparser._on_header_field(at[:length]) * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_header_field` callback error") # <<<<<<<<<<<<<< * pyparser._last_error = ex * return cparser.HPE_USER */ llhttp_set_error_reason(__pyx_v_parser, ((char const *)"`on_header_field` callback error")); /* "httptools/parser/parser.pyx":287 * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_header_field` callback error") * pyparser._last_error = ex # <<<<<<<<<<<<<< * return cparser.HPE_USER * else: */ __Pyx_INCREF(__pyx_v_ex); __Pyx_GIVEREF(__pyx_v_ex); __Pyx_GOTREF(__pyx_v_pyparser->_last_error); __Pyx_DECREF(__pyx_v_pyparser->_last_error); __pyx_v_pyparser->_last_error = __pyx_v_ex; /* "httptools/parser/parser.pyx":288 * cparser.llhttp_set_error_reason(parser, "`on_header_field` callback error") * pyparser._last_error = ex * return cparser.HPE_USER # <<<<<<<<<<<<<< * else: * return 0 */ __pyx_r = HPE_USER; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L13_return; } /* "httptools/parser/parser.pyx":285 * try: * pyparser._on_header_field(at[:length]) * except BaseException as ex: # <<<<<<<<<<<<<< * cparser.llhttp_set_error_reason(parser, "`on_header_field` callback error") * pyparser._last_error = ex */ /*finally:*/ { __pyx_L13_return: { __pyx_t_6 = __pyx_r; __Pyx_DECREF(__pyx_v_ex); __pyx_v_ex = NULL; __pyx_r = __pyx_t_6; goto __pyx_L6_except_return; } } } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "httptools/parser/parser.pyx":283 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_header_field(at[:length]) * except BaseException as ex: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "httptools/parser/parser.pyx":280 * * * cdef int cb_on_header_field(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("httptools.parser.parser.cb_on_header_field", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); __Pyx_XDECREF(__pyx_v_ex); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":293 * * * cdef int cb_on_header_value(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data */ static int __pyx_f_9httptools_6parser_6parser_cb_on_header_value(llhttp_t *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) { struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; PyObject *__pyx_v_ex = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cb_on_header_value", 0); /* "httptools/parser/parser.pyx":295 * cdef int cb_on_header_value(cparser.llhttp_t* parser, * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<< * try: * pyparser._on_header_value(at[:length]) */ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); __Pyx_INCREF(__pyx_t_1); __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":296 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_header_value(at[:length]) * except BaseException as ex: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "httptools/parser/parser.pyx":297 * cdef HttpParser pyparser = <HttpParser>parser.data * try: * pyparser._on_header_value(at[:length]) # <<<<<<<<<<<<<< * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_header_value` callback error") */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_at + 0, __pyx_v_length - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 297, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_header_value(__pyx_v_pyparser, ((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 297, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "httptools/parser/parser.pyx":296 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_header_value(at[:length]) * except BaseException as ex: */ } /* "httptools/parser/parser.pyx":303 * return cparser.HPE_USER * else: * return 0 # <<<<<<<<<<<<<< * * */ /*else:*/ { __pyx_r = 0; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; /* "httptools/parser/parser.pyx":298 * try: * pyparser._on_header_value(at[:length]) * except BaseException as ex: # <<<<<<<<<<<<<< * cparser.llhttp_set_error_reason(parser, "`on_header_value` callback error") * pyparser._last_error = ex */ __pyx_t_6 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); if (__pyx_t_6) { __Pyx_AddTraceback("httptools.parser.parser.cb_on_header_value", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_1, &__pyx_t_7) < 0) __PYX_ERR(0, 298, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_t_1); __pyx_v_ex = __pyx_t_1; /*try:*/ { /* "httptools/parser/parser.pyx":299 * pyparser._on_header_value(at[:length]) * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_header_value` callback error") # <<<<<<<<<<<<<< * pyparser._last_error = ex * return cparser.HPE_USER */ llhttp_set_error_reason(__pyx_v_parser, ((char const *)"`on_header_value` callback error")); /* "httptools/parser/parser.pyx":300 * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_header_value` callback error") * pyparser._last_error = ex # <<<<<<<<<<<<<< * return cparser.HPE_USER * else: */ __Pyx_INCREF(__pyx_v_ex); __Pyx_GIVEREF(__pyx_v_ex); __Pyx_GOTREF(__pyx_v_pyparser->_last_error); __Pyx_DECREF(__pyx_v_pyparser->_last_error); __pyx_v_pyparser->_last_error = __pyx_v_ex; /* "httptools/parser/parser.pyx":301 * cparser.llhttp_set_error_reason(parser, "`on_header_value` callback error") * pyparser._last_error = ex * return cparser.HPE_USER # <<<<<<<<<<<<<< * else: * return 0 */ __pyx_r = HPE_USER; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L13_return; } /* "httptools/parser/parser.pyx":298 * try: * pyparser._on_header_value(at[:length]) * except BaseException as ex: # <<<<<<<<<<<<<< * cparser.llhttp_set_error_reason(parser, "`on_header_value` callback error") * pyparser._last_error = ex */ /*finally:*/ { __pyx_L13_return: { __pyx_t_6 = __pyx_r; __Pyx_DECREF(__pyx_v_ex); __pyx_v_ex = NULL; __pyx_r = __pyx_t_6; goto __pyx_L6_except_return; } } } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "httptools/parser/parser.pyx":296 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_header_value(at[:length]) * except BaseException as ex: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "httptools/parser/parser.pyx":293 * * * cdef int cb_on_header_value(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("httptools.parser.parser.cb_on_header_value", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); __Pyx_XDECREF(__pyx_v_ex); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":306 * * * cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< * cdef HttpParser pyparser = <HttpParser>parser.data * try: */ static int __pyx_f_9httptools_6parser_6parser_cb_on_headers_complete(llhttp_t *__pyx_v_parser) { struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; PyObject *__pyx_v_ex = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cb_on_headers_complete", 0); /* "httptools/parser/parser.pyx":307 * * cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<< * try: * pyparser._on_headers_complete() */ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); __Pyx_INCREF(__pyx_t_1); __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":308 * cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_headers_complete() * except BaseException as ex: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "httptools/parser/parser.pyx":309 * cdef HttpParser pyparser = <HttpParser>parser.data * try: * pyparser._on_headers_complete() # <<<<<<<<<<<<<< * except BaseException as ex: * pyparser._last_error = ex */ __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_headers_complete(__pyx_v_pyparser); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 309, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":308 * cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_headers_complete() * except BaseException as ex: */ } /* "httptools/parser/parser.pyx":314 * return -1 * else: * if pyparser._cparser.upgrade: # <<<<<<<<<<<<<< * return 1 * else: */ /*else:*/ { __pyx_t_5 = (__pyx_v_pyparser->_cparser->upgrade != 0); if (__pyx_t_5) { /* "httptools/parser/parser.pyx":315 * else: * if pyparser._cparser.upgrade: * return 1 # <<<<<<<<<<<<<< * else: * return 0 */ __pyx_r = 1; goto __pyx_L6_except_return; /* "httptools/parser/parser.pyx":314 * return -1 * else: * if pyparser._cparser.upgrade: # <<<<<<<<<<<<<< * return 1 * else: */ } /* "httptools/parser/parser.pyx":317 * return 1 * else: * return 0 # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_r = 0; goto __pyx_L6_except_return; } } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":310 * try: * pyparser._on_headers_complete() * except BaseException as ex: # <<<<<<<<<<<<<< * pyparser._last_error = ex * return -1 */ __pyx_t_6 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); if (__pyx_t_6) { __Pyx_AddTraceback("httptools.parser.parser.cb_on_headers_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_7, &__pyx_t_8) < 0) __PYX_ERR(0, 310, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_t_7); __pyx_v_ex = __pyx_t_7; /*try:*/ { /* "httptools/parser/parser.pyx":311 * pyparser._on_headers_complete() * except BaseException as ex: * pyparser._last_error = ex # <<<<<<<<<<<<<< * return -1 * else: */ __Pyx_INCREF(__pyx_v_ex); __Pyx_GIVEREF(__pyx_v_ex); __Pyx_GOTREF(__pyx_v_pyparser->_last_error); __Pyx_DECREF(__pyx_v_pyparser->_last_error); __pyx_v_pyparser->_last_error = __pyx_v_ex; /* "httptools/parser/parser.pyx":312 * except BaseException as ex: * pyparser._last_error = ex * return -1 # <<<<<<<<<<<<<< * else: * if pyparser._cparser.upgrade: */ __pyx_r = -1; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L14_return; } /* "httptools/parser/parser.pyx":310 * try: * pyparser._on_headers_complete() * except BaseException as ex: # <<<<<<<<<<<<<< * pyparser._last_error = ex * return -1 */ /*finally:*/ { __pyx_L14_return: { __pyx_t_6 = __pyx_r; __Pyx_DECREF(__pyx_v_ex); __pyx_v_ex = NULL; __pyx_r = __pyx_t_6; goto __pyx_L6_except_return; } } } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "httptools/parser/parser.pyx":308 * cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_headers_complete() * except BaseException as ex: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "httptools/parser/parser.pyx":306 * * * cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< * cdef HttpParser pyparser = <HttpParser>parser.data * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("httptools.parser.parser.cb_on_headers_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); __Pyx_XDECREF(__pyx_v_ex); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":320 * * * cdef int cb_on_body(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data */ static int __pyx_f_9httptools_6parser_6parser_cb_on_body(llhttp_t *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) { struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; PyObject *__pyx_v_ex = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cb_on_body", 0); /* "httptools/parser/parser.pyx":322 * cdef int cb_on_body(cparser.llhttp_t* parser, * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<< * try: * pyparser._proto_on_body(at[:length]) */ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); __Pyx_INCREF(__pyx_t_1); __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":323 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_body(at[:length]) * except BaseException as ex: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "httptools/parser/parser.pyx":324 * cdef HttpParser pyparser = <HttpParser>parser.data * try: * pyparser._proto_on_body(at[:length]) # <<<<<<<<<<<<<< * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_body` callback error") */ __pyx_t_5 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_at + 0, __pyx_v_length - 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 324, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_pyparser->_proto_on_body); __pyx_t_6 = __pyx_v_pyparser->_proto_on_body; __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } __pyx_t_1 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_7, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 324, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":323 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_body(at[:length]) * except BaseException as ex: */ } /* "httptools/parser/parser.pyx":330 * return cparser.HPE_USER * else: * return 0 # <<<<<<<<<<<<<< * * */ /*else:*/ { __pyx_r = 0; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "httptools/parser/parser.pyx":325 * try: * pyparser._proto_on_body(at[:length]) * except BaseException as ex: # <<<<<<<<<<<<<< * cparser.llhttp_set_error_reason(parser, "`on_body` callback error") * pyparser._last_error = ex */ __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); if (__pyx_t_8) { __Pyx_AddTraceback("httptools.parser.parser.cb_on_body", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_5) < 0) __PYX_ERR(0, 325, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __pyx_v_ex = __pyx_t_6; /*try:*/ { /* "httptools/parser/parser.pyx":326 * pyparser._proto_on_body(at[:length]) * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_body` callback error") # <<<<<<<<<<<<<< * pyparser._last_error = ex * return cparser.HPE_USER */ llhttp_set_error_reason(__pyx_v_parser, ((char const *)"`on_body` callback error")); /* "httptools/parser/parser.pyx":327 * except BaseException as ex: * cparser.llhttp_set_error_reason(parser, "`on_body` callback error") * pyparser._last_error = ex # <<<<<<<<<<<<<< * return cparser.HPE_USER * else: */ __Pyx_INCREF(__pyx_v_ex); __Pyx_GIVEREF(__pyx_v_ex); __Pyx_GOTREF(__pyx_v_pyparser->_last_error); __Pyx_DECREF(__pyx_v_pyparser->_last_error); __pyx_v_pyparser->_last_error = __pyx_v_ex; /* "httptools/parser/parser.pyx":328 * cparser.llhttp_set_error_reason(parser, "`on_body` callback error") * pyparser._last_error = ex * return cparser.HPE_USER # <<<<<<<<<<<<<< * else: * return 0 */ __pyx_r = HPE_USER; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L13_return; } /* "httptools/parser/parser.pyx":325 * try: * pyparser._proto_on_body(at[:length]) * except BaseException as ex: # <<<<<<<<<<<<<< * cparser.llhttp_set_error_reason(parser, "`on_body` callback error") * pyparser._last_error = ex */ /*finally:*/ { __pyx_L13_return: { __pyx_t_8 = __pyx_r; __Pyx_DECREF(__pyx_v_ex); __pyx_v_ex = NULL; __pyx_r = __pyx_t_8; goto __pyx_L6_except_return; } } } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "httptools/parser/parser.pyx":323 * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_body(at[:length]) * except BaseException as ex: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "httptools/parser/parser.pyx":320 * * * cdef int cb_on_body(cparser.llhttp_t* parser, # <<<<<<<<<<<<<< * const char *at, size_t length) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("httptools.parser.parser.cb_on_body", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); __Pyx_XDECREF(__pyx_v_ex); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":333 * * * cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< * cdef HttpParser pyparser = <HttpParser>parser.data * try: */ static int __pyx_f_9httptools_6parser_6parser_cb_on_message_complete(llhttp_t *__pyx_v_parser) { struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; PyObject *__pyx_v_ex = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cb_on_message_complete", 0); /* "httptools/parser/parser.pyx":334 * * cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<< * try: * pyparser._proto_on_message_complete() */ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); __Pyx_INCREF(__pyx_t_1); __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":335 * cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_message_complete() * except BaseException as ex: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "httptools/parser/parser.pyx":336 * cdef HttpParser pyparser = <HttpParser>parser.data * try: * pyparser._proto_on_message_complete() # <<<<<<<<<<<<<< * except BaseException as ex: * pyparser._last_error = ex */ __Pyx_INCREF(__pyx_v_pyparser->_proto_on_message_complete); __pyx_t_5 = __pyx_v_pyparser->_proto_on_message_complete; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_1 = (__pyx_t_6) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6) : __Pyx_PyObject_CallNoArg(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 336, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":335 * cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_message_complete() * except BaseException as ex: */ } /* "httptools/parser/parser.pyx":341 * return -1 * else: * return 0 # <<<<<<<<<<<<<< * * */ /*else:*/ { __pyx_r = 0; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; /* "httptools/parser/parser.pyx":337 * try: * pyparser._proto_on_message_complete() * except BaseException as ex: # <<<<<<<<<<<<<< * pyparser._last_error = ex * return -1 */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); if (__pyx_t_7) { __Pyx_AddTraceback("httptools.parser.parser.cb_on_message_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_6) < 0) __PYX_ERR(0, 337, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __pyx_v_ex = __pyx_t_5; /*try:*/ { /* "httptools/parser/parser.pyx":338 * pyparser._proto_on_message_complete() * except BaseException as ex: * pyparser._last_error = ex # <<<<<<<<<<<<<< * return -1 * else: */ __Pyx_INCREF(__pyx_v_ex); __Pyx_GIVEREF(__pyx_v_ex); __Pyx_GOTREF(__pyx_v_pyparser->_last_error); __Pyx_DECREF(__pyx_v_pyparser->_last_error); __pyx_v_pyparser->_last_error = __pyx_v_ex; /* "httptools/parser/parser.pyx":339 * except BaseException as ex: * pyparser._last_error = ex * return -1 # <<<<<<<<<<<<<< * else: * return 0 */ __pyx_r = -1; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L13_return; } /* "httptools/parser/parser.pyx":337 * try: * pyparser._proto_on_message_complete() * except BaseException as ex: # <<<<<<<<<<<<<< * pyparser._last_error = ex * return -1 */ /*finally:*/ { __pyx_L13_return: { __pyx_t_7 = __pyx_r; __Pyx_DECREF(__pyx_v_ex); __pyx_v_ex = NULL; __pyx_r = __pyx_t_7; goto __pyx_L6_except_return; } } } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "httptools/parser/parser.pyx":335 * cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._proto_on_message_complete() * except BaseException as ex: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "httptools/parser/parser.pyx":333 * * * cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< * cdef HttpParser pyparser = <HttpParser>parser.data * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("httptools.parser.parser.cb_on_message_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); __Pyx_XDECREF(__pyx_v_ex); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":344 * * * cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< * cdef HttpParser pyparser = <HttpParser>parser.data * try: */ static int __pyx_f_9httptools_6parser_6parser_cb_on_chunk_header(llhttp_t *__pyx_v_parser) { struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; PyObject *__pyx_v_ex = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cb_on_chunk_header", 0); /* "httptools/parser/parser.pyx":345 * * cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<< * try: * pyparser._on_chunk_header() */ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); __Pyx_INCREF(__pyx_t_1); __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":346 * cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_chunk_header() * except BaseException as ex: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "httptools/parser/parser.pyx":347 * cdef HttpParser pyparser = <HttpParser>parser.data * try: * pyparser._on_chunk_header() # <<<<<<<<<<<<<< * except BaseException as ex: * pyparser._last_error = ex */ __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_chunk_header(__pyx_v_pyparser); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 347, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":346 * cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_chunk_header() * except BaseException as ex: */ } /* "httptools/parser/parser.pyx":352 * return -1 * else: * return 0 # <<<<<<<<<<<<<< * * */ /*else:*/ { __pyx_r = 0; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":348 * try: * pyparser._on_chunk_header() * except BaseException as ex: # <<<<<<<<<<<<<< * pyparser._last_error = ex * return -1 */ __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); if (__pyx_t_5) { __Pyx_AddTraceback("httptools.parser.parser.cb_on_chunk_header", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(0, 348, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_t_6); __pyx_v_ex = __pyx_t_6; /*try:*/ { /* "httptools/parser/parser.pyx":349 * pyparser._on_chunk_header() * except BaseException as ex: * pyparser._last_error = ex # <<<<<<<<<<<<<< * return -1 * else: */ __Pyx_INCREF(__pyx_v_ex); __Pyx_GIVEREF(__pyx_v_ex); __Pyx_GOTREF(__pyx_v_pyparser->_last_error); __Pyx_DECREF(__pyx_v_pyparser->_last_error); __pyx_v_pyparser->_last_error = __pyx_v_ex; /* "httptools/parser/parser.pyx":350 * except BaseException as ex: * pyparser._last_error = ex * return -1 # <<<<<<<<<<<<<< * else: * return 0 */ __pyx_r = -1; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L13_return; } /* "httptools/parser/parser.pyx":348 * try: * pyparser._on_chunk_header() * except BaseException as ex: # <<<<<<<<<<<<<< * pyparser._last_error = ex * return -1 */ /*finally:*/ { __pyx_L13_return: { __pyx_t_5 = __pyx_r; __Pyx_DECREF(__pyx_v_ex); __pyx_v_ex = NULL; __pyx_r = __pyx_t_5; goto __pyx_L6_except_return; } } } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "httptools/parser/parser.pyx":346 * cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_chunk_header() * except BaseException as ex: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "httptools/parser/parser.pyx":344 * * * cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< * cdef HttpParser pyparser = <HttpParser>parser.data * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("httptools.parser.parser.cb_on_chunk_header", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); __Pyx_XDECREF(__pyx_v_ex); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":355 * * * cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< * cdef HttpParser pyparser = <HttpParser>parser.data * try: */ static int __pyx_f_9httptools_6parser_6parser_cb_on_chunk_complete(llhttp_t *__pyx_v_parser) { struct __pyx_obj_9httptools_6parser_6parser_HttpParser *__pyx_v_pyparser = 0; PyObject *__pyx_v_ex = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cb_on_chunk_complete", 0); /* "httptools/parser/parser.pyx":356 * * cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<< * try: * pyparser._on_chunk_complete() */ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data); __Pyx_INCREF(__pyx_t_1); __pyx_v_pyparser = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":357 * cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_chunk_complete() * except BaseException as ex: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "httptools/parser/parser.pyx":358 * cdef HttpParser pyparser = <HttpParser>parser.data * try: * pyparser._on_chunk_complete() # <<<<<<<<<<<<<< * except BaseException as ex: * pyparser._last_error = ex */ __pyx_t_1 = ((struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_chunk_complete(__pyx_v_pyparser); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 358, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":357 * cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_chunk_complete() * except BaseException as ex: */ } /* "httptools/parser/parser.pyx":363 * return -1 * else: * return 0 # <<<<<<<<<<<<<< * * */ /*else:*/ { __pyx_r = 0; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":359 * try: * pyparser._on_chunk_complete() * except BaseException as ex: # <<<<<<<<<<<<<< * pyparser._last_error = ex * return -1 */ __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException); if (__pyx_t_5) { __Pyx_AddTraceback("httptools.parser.parser.cb_on_chunk_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(0, 359, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_t_6); __pyx_v_ex = __pyx_t_6; /*try:*/ { /* "httptools/parser/parser.pyx":360 * pyparser._on_chunk_complete() * except BaseException as ex: * pyparser._last_error = ex # <<<<<<<<<<<<<< * return -1 * else: */ __Pyx_INCREF(__pyx_v_ex); __Pyx_GIVEREF(__pyx_v_ex); __Pyx_GOTREF(__pyx_v_pyparser->_last_error); __Pyx_DECREF(__pyx_v_pyparser->_last_error); __pyx_v_pyparser->_last_error = __pyx_v_ex; /* "httptools/parser/parser.pyx":361 * except BaseException as ex: * pyparser._last_error = ex * return -1 # <<<<<<<<<<<<<< * else: * return 0 */ __pyx_r = -1; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L13_return; } /* "httptools/parser/parser.pyx":359 * try: * pyparser._on_chunk_complete() * except BaseException as ex: # <<<<<<<<<<<<<< * pyparser._last_error = ex * return -1 */ /*finally:*/ { __pyx_L13_return: { __pyx_t_5 = __pyx_r; __Pyx_DECREF(__pyx_v_ex); __pyx_v_ex = NULL; __pyx_r = __pyx_t_5; goto __pyx_L6_except_return; } } } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "httptools/parser/parser.pyx":357 * cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: * cdef HttpParser pyparser = <HttpParser>parser.data * try: # <<<<<<<<<<<<<< * pyparser._on_chunk_complete() * except BaseException as ex: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "httptools/parser/parser.pyx":355 * * * cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: # <<<<<<<<<<<<<< * cdef HttpParser pyparser = <HttpParser>parser.data * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("httptools.parser.parser.cb_on_chunk_complete", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pyparser); __Pyx_XDECREF(__pyx_v_ex); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "httptools/parser/parser.pyx":366 * * * cdef parser_error_from_errno(cparser.llhttp_t* parser, cparser.llhttp_errno_t errno): # <<<<<<<<<<<<<< * cdef bytes reason = cparser.llhttp_get_error_reason(parser) * */ static PyObject *__pyx_f_9httptools_6parser_6parser_parser_error_from_errno(llhttp_t *__pyx_v_parser, llhttp_errno_t __pyx_v_errno) { PyObject *__pyx_v_reason = 0; PyObject *__pyx_v_cls = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("parser_error_from_errno", 0); /* "httptools/parser/parser.pyx":367 * * cdef parser_error_from_errno(cparser.llhttp_t* parser, cparser.llhttp_errno_t errno): * cdef bytes reason = cparser.llhttp_get_error_reason(parser) # <<<<<<<<<<<<<< * * if errno in (cparser.HPE_CB_MESSAGE_BEGIN, */ __pyx_t_1 = __Pyx_PyBytes_FromString(llhttp_get_error_reason(__pyx_v_parser)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 367, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_reason = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":369 * cdef bytes reason = cparser.llhttp_get_error_reason(parser) * * if errno in (cparser.HPE_CB_MESSAGE_BEGIN, # <<<<<<<<<<<<<< * cparser.HPE_CB_HEADERS_COMPLETE, * cparser.HPE_CB_MESSAGE_COMPLETE, */ switch (__pyx_v_errno) { case HPE_CB_MESSAGE_BEGIN: case HPE_CB_HEADERS_COMPLETE: /* "httptools/parser/parser.pyx":370 * * if errno in (cparser.HPE_CB_MESSAGE_BEGIN, * cparser.HPE_CB_HEADERS_COMPLETE, # <<<<<<<<<<<<<< * cparser.HPE_CB_MESSAGE_COMPLETE, * cparser.HPE_CB_CHUNK_HEADER, */ case HPE_CB_MESSAGE_COMPLETE: /* "httptools/parser/parser.pyx":371 * if errno in (cparser.HPE_CB_MESSAGE_BEGIN, * cparser.HPE_CB_HEADERS_COMPLETE, * cparser.HPE_CB_MESSAGE_COMPLETE, # <<<<<<<<<<<<<< * cparser.HPE_CB_CHUNK_HEADER, * cparser.HPE_CB_CHUNK_COMPLETE, */ case HPE_CB_CHUNK_HEADER: /* "httptools/parser/parser.pyx":372 * cparser.HPE_CB_HEADERS_COMPLETE, * cparser.HPE_CB_MESSAGE_COMPLETE, * cparser.HPE_CB_CHUNK_HEADER, # <<<<<<<<<<<<<< * cparser.HPE_CB_CHUNK_COMPLETE, * cparser.HPE_USER): */ case HPE_CB_CHUNK_COMPLETE: /* "httptools/parser/parser.pyx":373 * cparser.HPE_CB_MESSAGE_COMPLETE, * cparser.HPE_CB_CHUNK_HEADER, * cparser.HPE_CB_CHUNK_COMPLETE, # <<<<<<<<<<<<<< * cparser.HPE_USER): * cls = HttpParserCallbackError */ case HPE_USER: /* "httptools/parser/parser.pyx":375 * cparser.HPE_CB_CHUNK_COMPLETE, * cparser.HPE_USER): * cls = HttpParserCallbackError # <<<<<<<<<<<<<< * * elif errno == cparser.HPE_INVALID_STATUS: */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_HttpParserCallbackError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 375, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_cls = __pyx_t_1; __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":369 * cdef bytes reason = cparser.llhttp_get_error_reason(parser) * * if errno in (cparser.HPE_CB_MESSAGE_BEGIN, # <<<<<<<<<<<<<< * cparser.HPE_CB_HEADERS_COMPLETE, * cparser.HPE_CB_MESSAGE_COMPLETE, */ break; case HPE_INVALID_STATUS: /* "httptools/parser/parser.pyx":378 * * elif errno == cparser.HPE_INVALID_STATUS: * cls = HttpParserInvalidStatusError # <<<<<<<<<<<<<< * * elif errno == cparser.HPE_INVALID_METHOD: */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_HttpParserInvalidStatusError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 378, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_cls = __pyx_t_1; __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":377 * cls = HttpParserCallbackError * * elif errno == cparser.HPE_INVALID_STATUS: # <<<<<<<<<<<<<< * cls = HttpParserInvalidStatusError * */ break; case HPE_INVALID_METHOD: /* "httptools/parser/parser.pyx":381 * * elif errno == cparser.HPE_INVALID_METHOD: * cls = HttpParserInvalidMethodError # <<<<<<<<<<<<<< * * elif errno == cparser.HPE_INVALID_URL: */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_HttpParserInvalidMethodError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_cls = __pyx_t_1; __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":380 * cls = HttpParserInvalidStatusError * * elif errno == cparser.HPE_INVALID_METHOD: # <<<<<<<<<<<<<< * cls = HttpParserInvalidMethodError * */ break; case HPE_INVALID_URL: /* "httptools/parser/parser.pyx":384 * * elif errno == cparser.HPE_INVALID_URL: * cls = HttpParserInvalidURLError # <<<<<<<<<<<<<< * * else: */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_HttpParserInvalidURLError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 384, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_cls = __pyx_t_1; __pyx_t_1 = 0; /* "httptools/parser/parser.pyx":383 * cls = HttpParserInvalidMethodError * * elif errno == cparser.HPE_INVALID_URL: # <<<<<<<<<<<<<< * cls = HttpParserInvalidURLError * */ break; default: /* "httptools/parser/parser.pyx":387 * * else: * cls = HttpParserError # <<<<<<<<<<<<<< * * return cls(reason.decode('latin-1')) */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_HttpParserError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 387, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_cls = __pyx_t_1; __pyx_t_1 = 0; break; } /* "httptools/parser/parser.pyx":389 * cls = HttpParserError * * return cls(reason.decode('latin-1')) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_decode_bytes(__pyx_v_reason, 0, PY_SSIZE_T_MAX, NULL, NULL, PyUnicode_DecodeLatin1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_cls); __pyx_t_3 = __pyx_v_cls; __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "httptools/parser/parser.pyx":366 * * * cdef parser_error_from_errno(cparser.llhttp_t* parser, cparser.llhttp_errno_t errno): # <<<<<<<<<<<<<< * cdef bytes reason = cparser.llhttp_get_error_reason(parser) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("httptools.parser.parser.parser_error_from_errno", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_reason); __Pyx_XDECREF(__pyx_v_cls); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser __pyx_vtable_9httptools_6parser_6parser_HttpParser; static PyObject *__pyx_tp_new_9httptools_6parser_6parser_HttpParser(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_obj_9httptools_6parser_6parser_HttpParser *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)o); p->__pyx_vtab = __pyx_vtabptr_9httptools_6parser_6parser_HttpParser; p->_current_header_name = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_current_header_value = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_proto_on_url = Py_None; Py_INCREF(Py_None); p->_proto_on_status = Py_None; Py_INCREF(Py_None); p->_proto_on_body = Py_None; Py_INCREF(Py_None); p->_proto_on_header = Py_None; Py_INCREF(Py_None); p->_proto_on_headers_complete = Py_None; Py_INCREF(Py_None); p->_proto_on_message_complete = Py_None; Py_INCREF(Py_None); p->_proto_on_chunk_header = Py_None; Py_INCREF(Py_None); p->_proto_on_chunk_complete = Py_None; Py_INCREF(Py_None); p->_proto_on_message_begin = Py_None; Py_INCREF(Py_None); p->_last_error = Py_None; Py_INCREF(Py_None); p->py_buf.obj = NULL; if (unlikely(__pyx_pw_9httptools_6parser_6parser_10HttpParser_1__cinit__(o, __pyx_empty_tuple, NULL) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_9httptools_6parser_6parser_HttpParser(PyObject *o) { struct __pyx_obj_9httptools_6parser_6parser_HttpParser *p = (struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_pw_9httptools_6parser_6parser_10HttpParser_3__dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->_current_header_name); Py_CLEAR(p->_current_header_value); Py_CLEAR(p->_proto_on_url); Py_CLEAR(p->_proto_on_status); Py_CLEAR(p->_proto_on_body); Py_CLEAR(p->_proto_on_header); Py_CLEAR(p->_proto_on_headers_complete); Py_CLEAR(p->_proto_on_message_complete); Py_CLEAR(p->_proto_on_chunk_header); Py_CLEAR(p->_proto_on_chunk_complete); Py_CLEAR(p->_proto_on_message_begin); Py_CLEAR(p->_last_error); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_9httptools_6parser_6parser_HttpParser(PyObject *o, visitproc v, void *a) { int e; struct __pyx_obj_9httptools_6parser_6parser_HttpParser *p = (struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)o; if (p->_proto_on_url) { e = (*v)(p->_proto_on_url, a); if (e) return e; } if (p->_proto_on_status) { e = (*v)(p->_proto_on_status, a); if (e) return e; } if (p->_proto_on_body) { e = (*v)(p->_proto_on_body, a); if (e) return e; } if (p->_proto_on_header) { e = (*v)(p->_proto_on_header, a); if (e) return e; } if (p->_proto_on_headers_complete) { e = (*v)(p->_proto_on_headers_complete, a); if (e) return e; } if (p->_proto_on_message_complete) { e = (*v)(p->_proto_on_message_complete, a); if (e) return e; } if (p->_proto_on_chunk_header) { e = (*v)(p->_proto_on_chunk_header, a); if (e) return e; } if (p->_proto_on_chunk_complete) { e = (*v)(p->_proto_on_chunk_complete, a); if (e) return e; } if (p->_proto_on_message_begin) { e = (*v)(p->_proto_on_message_begin, a); if (e) return e; } if (p->_last_error) { e = (*v)(p->_last_error, a); if (e) return e; } if (p->py_buf.obj) { e = (*v)(p->py_buf.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_9httptools_6parser_6parser_HttpParser(PyObject *o) { PyObject* tmp; struct __pyx_obj_9httptools_6parser_6parser_HttpParser *p = (struct __pyx_obj_9httptools_6parser_6parser_HttpParser *)o; tmp = ((PyObject*)p->_proto_on_url); p->_proto_on_url = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_proto_on_status); p->_proto_on_status = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_proto_on_body); p->_proto_on_body = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_proto_on_header); p->_proto_on_header = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_proto_on_headers_complete); p->_proto_on_headers_complete = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_proto_on_message_complete); p->_proto_on_message_complete = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_proto_on_chunk_header); p->_proto_on_chunk_header = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_proto_on_chunk_complete); p->_proto_on_chunk_complete = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_proto_on_message_begin); p->_proto_on_message_begin = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_last_error); p->_last_error = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->py_buf.obj); return 0; } static PyMethodDef __pyx_methods_9httptools_6parser_6parser_HttpParser[] = { {"get_http_version", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_10HttpParser_5get_http_version, METH_NOARGS, 0}, {"should_keep_alive", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_10HttpParser_7should_keep_alive, METH_NOARGS, 0}, {"should_upgrade", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_10HttpParser_9should_upgrade, METH_NOARGS, 0}, {"feed_data", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_10HttpParser_11feed_data, METH_O, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_10HttpParser_13__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_10HttpParser_15__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type_9httptools_6parser_6parser_HttpParser = { PyVarObject_HEAD_INIT(0, 0) "httptools.parser.parser.HttpParser", /*tp_name*/ sizeof(struct __pyx_obj_9httptools_6parser_6parser_HttpParser), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_9httptools_6parser_6parser_HttpParser, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_9httptools_6parser_6parser_HttpParser, /*tp_traverse*/ __pyx_tp_clear_9httptools_6parser_6parser_HttpParser, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_9httptools_6parser_6parser_HttpParser, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_9httptools_6parser_6parser_HttpParser, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif }; static struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpRequestParser __pyx_vtable_9httptools_6parser_6parser_HttpRequestParser; static PyObject *__pyx_tp_new_9httptools_6parser_6parser_HttpRequestParser(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *p; PyObject *o = __pyx_tp_new_9httptools_6parser_6parser_HttpParser(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser*)__pyx_vtabptr_9httptools_6parser_6parser_HttpRequestParser; return o; } static PyMethodDef __pyx_methods_9httptools_6parser_6parser_HttpRequestParser[] = { {"get_method", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_3get_method, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_5__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_7__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type_9httptools_6parser_6parser_HttpRequestParser = { PyVarObject_HEAD_INIT(0, 0) "httptools.parser.parser.HttpRequestParser", /*tp_name*/ sizeof(struct __pyx_obj_9httptools_6parser_6parser_HttpRequestParser), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_9httptools_6parser_6parser_HttpParser, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_9httptools_6parser_6parser_HttpParser, /*tp_traverse*/ __pyx_tp_clear_9httptools_6parser_6parser_HttpParser, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_9httptools_6parser_6parser_HttpRequestParser, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_pw_9httptools_6parser_6parser_17HttpRequestParser_1__init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_9httptools_6parser_6parser_HttpRequestParser, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif }; static struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpResponseParser __pyx_vtable_9httptools_6parser_6parser_HttpResponseParser; static PyObject *__pyx_tp_new_9httptools_6parser_6parser_HttpResponseParser(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *p; PyObject *o = __pyx_tp_new_9httptools_6parser_6parser_HttpParser(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_9httptools_6parser_6parser_HttpParser*)__pyx_vtabptr_9httptools_6parser_6parser_HttpResponseParser; return o; } static PyMethodDef __pyx_methods_9httptools_6parser_6parser_HttpResponseParser[] = { {"get_status_code", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_3get_status_code, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_5__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_7__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type_9httptools_6parser_6parser_HttpResponseParser = { PyVarObject_HEAD_INIT(0, 0) "httptools.parser.parser.HttpResponseParser", /*tp_name*/ sizeof(struct __pyx_obj_9httptools_6parser_6parser_HttpResponseParser), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_9httptools_6parser_6parser_HttpParser, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_9httptools_6parser_6parser_HttpParser, /*tp_traverse*/ __pyx_tp_clear_9httptools_6parser_6parser_HttpParser, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_9httptools_6parser_6parser_HttpResponseParser, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_pw_9httptools_6parser_6parser_18HttpResponseParser_1__init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_9httptools_6parser_6parser_HttpResponseParser, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_parser(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_parser}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "parser", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_, __pyx_k_, sizeof(__pyx_k_), 0, 1, 0, 0}, {&__pyx_n_s_BaseException, __pyx_k_BaseException, sizeof(__pyx_k_BaseException), 0, 0, 1, 1}, {&__pyx_n_s_HttpParserCallbackError, __pyx_k_HttpParserCallbackError, sizeof(__pyx_k_HttpParserCallbackError), 0, 0, 1, 1}, {&__pyx_n_s_HttpParserError, __pyx_k_HttpParserError, sizeof(__pyx_k_HttpParserError), 0, 0, 1, 1}, {&__pyx_n_s_HttpParserInvalidMethodError, __pyx_k_HttpParserInvalidMethodError, sizeof(__pyx_k_HttpParserInvalidMethodError), 0, 0, 1, 1}, {&__pyx_n_s_HttpParserInvalidStatusError, __pyx_k_HttpParserInvalidStatusError, sizeof(__pyx_k_HttpParserInvalidStatusError), 0, 0, 1, 1}, {&__pyx_n_s_HttpParserInvalidURLError, __pyx_k_HttpParserInvalidURLError, sizeof(__pyx_k_HttpParserInvalidURLError), 0, 0, 1, 1}, {&__pyx_n_s_HttpParserUpgrade, __pyx_k_HttpParserUpgrade, sizeof(__pyx_k_HttpParserUpgrade), 0, 0, 1, 1}, {&__pyx_n_s_HttpRequestParser, __pyx_k_HttpRequestParser, sizeof(__pyx_k_HttpRequestParser), 0, 0, 1, 1}, {&__pyx_n_u_HttpRequestParser, __pyx_k_HttpRequestParser, sizeof(__pyx_k_HttpRequestParser), 0, 1, 0, 1}, {&__pyx_n_s_HttpResponseParser, __pyx_k_HttpResponseParser, sizeof(__pyx_k_HttpResponseParser), 0, 0, 1, 1}, {&__pyx_n_u_HttpResponseParser, __pyx_k_HttpResponseParser, sizeof(__pyx_k_HttpResponseParser), 0, 1, 0, 1}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_n_s_all, __pyx_k_all, sizeof(__pyx_k_all), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_context, __pyx_k_context, sizeof(__pyx_k_context), 0, 0, 1, 1}, {&__pyx_n_s_errors, __pyx_k_errors, sizeof(__pyx_k_errors), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_kp_u_invalid_headers_state, __pyx_k_invalid_headers_state, sizeof(__pyx_k_invalid_headers_state), 0, 1, 0, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_u_on_body, __pyx_k_on_body, sizeof(__pyx_k_on_body), 0, 1, 0, 1}, {&__pyx_n_u_on_chunk_complete, __pyx_k_on_chunk_complete, sizeof(__pyx_k_on_chunk_complete), 0, 1, 0, 1}, {&__pyx_n_u_on_chunk_header, __pyx_k_on_chunk_header, sizeof(__pyx_k_on_chunk_header), 0, 1, 0, 1}, {&__pyx_n_u_on_header, __pyx_k_on_header, sizeof(__pyx_k_on_header), 0, 1, 0, 1}, {&__pyx_n_u_on_headers_complete, __pyx_k_on_headers_complete, sizeof(__pyx_k_on_headers_complete), 0, 1, 0, 1}, {&__pyx_n_u_on_message_begin, __pyx_k_on_message_begin, sizeof(__pyx_k_on_message_begin), 0, 1, 0, 1}, {&__pyx_n_u_on_message_complete, __pyx_k_on_message_complete, sizeof(__pyx_k_on_message_complete), 0, 1, 0, 1}, {&__pyx_n_u_on_status, __pyx_k_on_status, sizeof(__pyx_k_on_status), 0, 1, 0, 1}, {&__pyx_n_u_on_url, __pyx_k_on_url, sizeof(__pyx_k_on_url), 0, 1, 0, 1}, {&__pyx_n_s_protocol, __pyx_k_protocol, sizeof(__pyx_k_protocol), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(0, 48, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_BaseException = __Pyx_GetBuiltinName(__pyx_n_s_BaseException); if (!__pyx_builtin_BaseException) __PYX_ERR(0, 247, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "httptools/parser/parser.pyx":22 * * * __all__ = ('HttpRequestParser', 'HttpResponseParser') # <<<<<<<<<<<<<< * * */ __pyx_tuple__8 = PyTuple_Pack(2, __pyx_n_u_HttpRequestParser, __pyx_n_u_HttpResponseParser); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_9httptools_6parser_6parser_HttpParser = &__pyx_vtable_9httptools_6parser_6parser_HttpParser; __pyx_vtable_9httptools_6parser_6parser_HttpParser._init = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *, PyObject *, llhttp_type_t))__pyx_f_9httptools_6parser_6parser_10HttpParser__init; __pyx_vtable_9httptools_6parser_6parser_HttpParser._maybe_call_on_header = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *))__pyx_f_9httptools_6parser_6parser_10HttpParser__maybe_call_on_header; __pyx_vtable_9httptools_6parser_6parser_HttpParser._on_header_field = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *, PyObject *))__pyx_f_9httptools_6parser_6parser_10HttpParser__on_header_field; __pyx_vtable_9httptools_6parser_6parser_HttpParser._on_header_value = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *, PyObject *))__pyx_f_9httptools_6parser_6parser_10HttpParser__on_header_value; __pyx_vtable_9httptools_6parser_6parser_HttpParser._on_headers_complete = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *))__pyx_f_9httptools_6parser_6parser_10HttpParser__on_headers_complete; __pyx_vtable_9httptools_6parser_6parser_HttpParser._on_chunk_header = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *))__pyx_f_9httptools_6parser_6parser_10HttpParser__on_chunk_header; __pyx_vtable_9httptools_6parser_6parser_HttpParser._on_chunk_complete = (PyObject *(*)(struct __pyx_obj_9httptools_6parser_6parser_HttpParser *))__pyx_f_9httptools_6parser_6parser_10HttpParser__on_chunk_complete; if (PyType_Ready(&__pyx_type_9httptools_6parser_6parser_HttpParser) < 0) __PYX_ERR(0, 26, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_9httptools_6parser_6parser_HttpParser.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_9httptools_6parser_6parser_HttpParser.tp_dictoffset && __pyx_type_9httptools_6parser_6parser_HttpParser.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_9httptools_6parser_6parser_HttpParser.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type_9httptools_6parser_6parser_HttpParser.tp_dict, __pyx_vtabptr_9httptools_6parser_6parser_HttpParser) < 0) __PYX_ERR(0, 26, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type_9httptools_6parser_6parser_HttpParser) < 0) __PYX_ERR(0, 26, __pyx_L1_error) __pyx_ptype_9httptools_6parser_6parser_HttpParser = &__pyx_type_9httptools_6parser_6parser_HttpParser; __pyx_vtabptr_9httptools_6parser_6parser_HttpRequestParser = &__pyx_vtable_9httptools_6parser_6parser_HttpRequestParser; __pyx_vtable_9httptools_6parser_6parser_HttpRequestParser.__pyx_base = *__pyx_vtabptr_9httptools_6parser_6parser_HttpParser; __pyx_type_9httptools_6parser_6parser_HttpRequestParser.tp_base = __pyx_ptype_9httptools_6parser_6parser_HttpParser; if (PyType_Ready(&__pyx_type_9httptools_6parser_6parser_HttpRequestParser) < 0) __PYX_ERR(0, 215, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_9httptools_6parser_6parser_HttpRequestParser.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_9httptools_6parser_6parser_HttpRequestParser.tp_dictoffset && __pyx_type_9httptools_6parser_6parser_HttpRequestParser.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_9httptools_6parser_6parser_HttpRequestParser.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type_9httptools_6parser_6parser_HttpRequestParser.tp_dict, __pyx_vtabptr_9httptools_6parser_6parser_HttpRequestParser) < 0) __PYX_ERR(0, 215, __pyx_L1_error) if (PyObject_SetAttr(__pyx_m, __pyx_n_s_HttpRequestParser, (PyObject *)&__pyx_type_9httptools_6parser_6parser_HttpRequestParser) < 0) __PYX_ERR(0, 215, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type_9httptools_6parser_6parser_HttpRequestParser) < 0) __PYX_ERR(0, 215, __pyx_L1_error) __pyx_ptype_9httptools_6parser_6parser_HttpRequestParser = &__pyx_type_9httptools_6parser_6parser_HttpRequestParser; __pyx_vtabptr_9httptools_6parser_6parser_HttpResponseParser = &__pyx_vtable_9httptools_6parser_6parser_HttpResponseParser; __pyx_vtable_9httptools_6parser_6parser_HttpResponseParser.__pyx_base = *__pyx_vtabptr_9httptools_6parser_6parser_HttpParser; __pyx_type_9httptools_6parser_6parser_HttpResponseParser.tp_base = __pyx_ptype_9httptools_6parser_6parser_HttpParser; if (PyType_Ready(&__pyx_type_9httptools_6parser_6parser_HttpResponseParser) < 0) __PYX_ERR(0, 229, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_9httptools_6parser_6parser_HttpResponseParser.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_9httptools_6parser_6parser_HttpResponseParser.tp_dictoffset && __pyx_type_9httptools_6parser_6parser_HttpResponseParser.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_9httptools_6parser_6parser_HttpResponseParser.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type_9httptools_6parser_6parser_HttpResponseParser.tp_dict, __pyx_vtabptr_9httptools_6parser_6parser_HttpResponseParser) < 0) __PYX_ERR(0, 229, __pyx_L1_error) if (PyObject_SetAttr(__pyx_m, __pyx_n_s_HttpResponseParser, (PyObject *)&__pyx_type_9httptools_6parser_6parser_HttpResponseParser) < 0) __PYX_ERR(0, 229, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type_9httptools_6parser_6parser_HttpResponseParser) < 0) __PYX_ERR(0, 229, __pyx_L1_error) __pyx_ptype_9httptools_6parser_6parser_HttpResponseParser = &__pyx_type_9httptools_6parser_6parser_HttpResponseParser; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_4bool_bool) __PYX_ERR(3, 8, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(4, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_7complex_complex) __PYX_ERR(4, 15, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initparser(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initparser(void) #else __Pyx_PyMODINIT_FUNC PyInit_parser(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_parser(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_parser(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'parser' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_parser(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS PyEval_InitThreads(); #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("parser", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_httptools__parser__parser) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "httptools.parser.parser")) { if (unlikely(PyDict_SetItemString(modules, "httptools.parser.parser", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "httptools/parser/parser.pyx":11 * * * from .errors import (HttpParserError, # <<<<<<<<<<<<<< * HttpParserCallbackError, * HttpParserInvalidStatusError, */ __pyx_t_1 = PyList_New(6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_HttpParserError); __Pyx_GIVEREF(__pyx_n_s_HttpParserError); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_HttpParserError); __Pyx_INCREF(__pyx_n_s_HttpParserCallbackError); __Pyx_GIVEREF(__pyx_n_s_HttpParserCallbackError); PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_HttpParserCallbackError); __Pyx_INCREF(__pyx_n_s_HttpParserInvalidStatusError); __Pyx_GIVEREF(__pyx_n_s_HttpParserInvalidStatusError); PyList_SET_ITEM(__pyx_t_1, 2, __pyx_n_s_HttpParserInvalidStatusError); __Pyx_INCREF(__pyx_n_s_HttpParserInvalidMethodError); __Pyx_GIVEREF(__pyx_n_s_HttpParserInvalidMethodError); PyList_SET_ITEM(__pyx_t_1, 3, __pyx_n_s_HttpParserInvalidMethodError); __Pyx_INCREF(__pyx_n_s_HttpParserInvalidURLError); __Pyx_GIVEREF(__pyx_n_s_HttpParserInvalidURLError); PyList_SET_ITEM(__pyx_t_1, 4, __pyx_n_s_HttpParserInvalidURLError); __Pyx_INCREF(__pyx_n_s_HttpParserUpgrade); __Pyx_GIVEREF(__pyx_n_s_HttpParserUpgrade); PyList_SET_ITEM(__pyx_t_1, 5, __pyx_n_s_HttpParserUpgrade); __pyx_t_2 = __Pyx_Import(__pyx_n_s_errors, __pyx_t_1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserError, __pyx_t_1) < 0) __PYX_ERR(0, 11, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserCallbackError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserCallbackError, __pyx_t_1) < 0) __PYX_ERR(0, 12, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserInvalidStatusError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserInvalidStatusError, __pyx_t_1) < 0) __PYX_ERR(0, 13, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserInvalidMethodError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserInvalidMethodError, __pyx_t_1) < 0) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserInvalidURLError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserInvalidURLError, __pyx_t_1) < 0) __PYX_ERR(0, 15, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_HttpParserUpgrade); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpParserUpgrade, __pyx_t_1) < 0) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "httptools/parser/parser.pyx":22 * * * __all__ = ('HttpRequestParser', 'HttpResponseParser') # <<<<<<<<<<<<<< * * */ if (PyDict_SetItem(__pyx_d, __pyx_n_s_all, __pyx_tuple__8) < 0) __PYX_ERR(0, 22, __pyx_L1_error) /* "httptools/parser/parser.pyx":1 * #cython: language_level=3 # <<<<<<<<<<<<<< * * from __future__ import print_function */ __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init httptools.parser.parser", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init httptools.parser.parser"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* KeywordStringCheck */ static int __Pyx_CheckKeywordStrings( PyObject *kwdict, const char* function_name, int kw_allowed) { PyObject* key = 0; Py_ssize_t pos = 0; #if CYTHON_COMPILING_IN_PYPY if (!kw_allowed && PyDict_Next(kwdict, &pos, &key, 0)) goto invalid_keyword; return 1; #else while (PyDict_Next(kwdict, &pos, &key, 0)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyString_Check(key))) #endif if (unlikely(!PyUnicode_Check(key))) goto invalid_keyword_type; } if ((!kw_allowed) && unlikely(key)) goto invalid_keyword; return 1; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); return 0; #endif invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif return 0; } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = Py_TYPE(func)->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, NULL, 0); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) #else if (likely(PyCFunction_Check(func))) #endif { if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (__Pyx_PyFastCFunction_Check(func)) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyObjectSetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_setattro)) return tp->tp_setattro(obj, attr_name, value); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_setattr)) return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); #endif return PyObject_SetAttr(obj, attr_name, value); } #endif /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* decode_c_bytes */ static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes( const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { if (unlikely((start < 0) | (stop < 0))) { if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } if (stop > length) stop = length; if (unlikely(stop <= start)) return __Pyx_NewRef(__pyx_empty_unicode); length = stop - start; cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size) { PyObject *result = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif result = PyObject_GetAttrString(module, class_name); if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if ((size_t)basicsize < size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(result); return NULL; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; (void) PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = NULL; PyObject *py_funcname = NULL; #if PY_MAJOR_VERSION < 3 PyObject *py_srcfile = NULL; py_srcfile = PyString_FromString(filename); if (!py_srcfile) goto bad; #endif if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); if (!py_funcname) goto bad; #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); if (!py_funcname) goto bad; funcname = PyUnicode_AsUTF8(py_funcname); if (!funcname) goto bad; #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); if (!py_funcname) goto bad; #endif } #if PY_MAJOR_VERSION < 3 py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); #else py_code = PyCode_NewEmpty(filename, funcname, py_line); #endif Py_XDECREF(py_funcname); // XDECREF since it's only set on Py3 if cline return py_code; bad: Py_XDECREF(py_funcname); #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_srcfile); #endif return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint8_t(uint8_t value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const uint8_t neg_one = (uint8_t) -1, const_zero = (uint8_t) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(uint8_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(uint8_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint8_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(uint8_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint8_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(uint8_t), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const int neg_one = (int) -1, const_zero = (int) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_ptrdiff_t(ptrdiff_t value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const ptrdiff_t neg_one = (ptrdiff_t) -1, const_zero = (ptrdiff_t) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(ptrdiff_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(ptrdiff_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(ptrdiff_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(ptrdiff_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(ptrdiff_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(ptrdiff_t), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint16_t(uint16_t value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const uint16_t neg_one = (uint16_t) -1, const_zero = (uint16_t) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(uint16_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(uint16_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint16_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(uint16_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint16_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(uint16_t), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const long neg_one = (long) -1, const_zero = (long) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const long neg_one = (long) -1, const_zero = (long) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const int neg_one = (int) -1, const_zero = (int) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) { return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o); #if PY_MAJOR_VERSION < 3 } else if (likely(PyInt_CheckExact(o))) { return PyInt_AS_LONG(o); #endif } else { Py_ssize_t ival; PyObject *x; x = PyNumber_Index(o); if (!x) return -1; ival = PyInt_AsLong(x); Py_DECREF(x); return ival; } } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
400,467
C
37.425254
310
0.588341
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/_build_tables.py
#----------------------------------------------------------------- # pycparser: _build_tables.py # # A dummy for generating the lexing/parsing tables and and # compiling them into .pyc for faster execution in optimized mode. # Also generates AST code from the configuration file. # Should be called from the pycparser directory. # # Eli Bendersky [https://eli.thegreenplace.net/] # License: BSD #----------------------------------------------------------------- # Insert '.' and '..' as first entries to the search path for modules. # Restricted environments like embeddable python do not include the # current working directory on startup. import sys sys.path[0:0] = ['.', '..'] # Generate c_ast.py from _ast_gen import ASTCodeGenerator ast_gen = ASTCodeGenerator('_c_ast.cfg') ast_gen.generate(open('c_ast.py', 'w')) from pycparser import c_parser # Generates the tables # c_parser.CParser( lex_optimize=True, yacc_debug=False, yacc_optimize=True) # Load to compile into .pyc # import lextab import yacctab import c_ast
1,039
Python
26.36842
70
0.639076
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/_ast_gen.py
#----------------------------------------------------------------- # _ast_gen.py # # Generates the AST Node classes from a specification given in # a configuration file # # The design of this module was inspired by astgen.py from the # Python 2.5 code-base. # # Eli Bendersky [https://eli.thegreenplace.net/] # License: BSD #----------------------------------------------------------------- from string import Template class ASTCodeGenerator(object): def __init__(self, cfg_filename='_c_ast.cfg'): """ Initialize the code generator from a configuration file. """ self.cfg_filename = cfg_filename self.node_cfg = [NodeCfg(name, contents) for (name, contents) in self.parse_cfgfile(cfg_filename)] def generate(self, file=None): """ Generates the code into file, an open file buffer. """ src = Template(_PROLOGUE_COMMENT).substitute( cfg_filename=self.cfg_filename) src += _PROLOGUE_CODE for node_cfg in self.node_cfg: src += node_cfg.generate_source() + '\n\n' file.write(src) def parse_cfgfile(self, filename): """ Parse the configuration file and yield pairs of (name, contents) for each node. """ with open(filename, "r") as f: for line in f: line = line.strip() if not line or line.startswith('#'): continue colon_i = line.find(':') lbracket_i = line.find('[') rbracket_i = line.find(']') if colon_i < 1 or lbracket_i <= colon_i or rbracket_i <= lbracket_i: raise RuntimeError("Invalid line in %s:\n%s\n" % (filename, line)) name = line[:colon_i] val = line[lbracket_i + 1:rbracket_i] vallist = [v.strip() for v in val.split(',')] if val else [] yield name, vallist class NodeCfg(object): """ Node configuration. name: node name contents: a list of contents - attributes and child nodes See comment at the top of the configuration file for details. """ def __init__(self, name, contents): self.name = name self.all_entries = [] self.attr = [] self.child = [] self.seq_child = [] for entry in contents: clean_entry = entry.rstrip('*') self.all_entries.append(clean_entry) if entry.endswith('**'): self.seq_child.append(clean_entry) elif entry.endswith('*'): self.child.append(clean_entry) else: self.attr.append(entry) def generate_source(self): src = self._gen_init() src += '\n' + self._gen_children() src += '\n' + self._gen_iter() src += '\n' + self._gen_attr_names() return src def _gen_init(self): src = "class %s(Node):\n" % self.name if self.all_entries: args = ', '.join(self.all_entries) slots = ', '.join("'{0}'".format(e) for e in self.all_entries) slots += ", 'coord', '__weakref__'" arglist = '(self, %s, coord=None)' % args else: slots = "'coord', '__weakref__'" arglist = '(self, coord=None)' src += " __slots__ = (%s)\n" % slots src += " def __init__%s:\n" % arglist for name in self.all_entries + ['coord']: src += " self.%s = %s\n" % (name, name) return src def _gen_children(self): src = ' def children(self):\n' if self.all_entries: src += ' nodelist = []\n' for child in self.child: src += ( ' if self.%(child)s is not None:' + ' nodelist.append(("%(child)s", self.%(child)s))\n') % ( dict(child=child)) for seq_child in self.seq_child: src += ( ' for i, child in enumerate(self.%(child)s or []):\n' ' nodelist.append(("%(child)s[%%d]" %% i, child))\n') % ( dict(child=seq_child)) src += ' return tuple(nodelist)\n' else: src += ' return ()\n' return src def _gen_iter(self): src = ' def __iter__(self):\n' if self.all_entries: for child in self.child: src += ( ' if self.%(child)s is not None:\n' + ' yield self.%(child)s\n') % (dict(child=child)) for seq_child in self.seq_child: src += ( ' for child in (self.%(child)s or []):\n' ' yield child\n') % (dict(child=seq_child)) if not (self.child or self.seq_child): # Empty generator src += ( ' return\n' + ' yield\n') else: # Empty generator src += ( ' return\n' + ' yield\n') return src def _gen_attr_names(self): src = " attr_names = (" + ''.join("%r, " % nm for nm in self.attr) + ')' return src _PROLOGUE_COMMENT = \ r'''#----------------------------------------------------------------- # ** ATTENTION ** # This code was automatically generated from the file: # $cfg_filename # # Do not modify it directly. Modify the configuration file and # run the generator again. # ** ** *** ** ** # # pycparser: c_ast.py # # AST Node classes. # # Eli Bendersky [https://eli.thegreenplace.net/] # License: BSD #----------------------------------------------------------------- ''' _PROLOGUE_CODE = r''' import sys def _repr(obj): """ Get the representation of an object, with dedicated pprint-like format for lists. """ if isinstance(obj, list): return '[' + (',\n '.join((_repr(e).replace('\n', '\n ') for e in obj))) + '\n]' else: return repr(obj) class Node(object): __slots__ = () """ Abstract base class for AST nodes. """ def __repr__(self): """ Generates a python representation of the current node """ result = self.__class__.__name__ + '(' indent = '' separator = '' for name in self.__slots__[:-2]: result += separator result += indent result += name + '=' + (_repr(getattr(self, name)).replace('\n', '\n ' + (' ' * (len(name) + len(self.__class__.__name__))))) separator = ',' indent = '\n ' + (' ' * len(self.__class__.__name__)) result += indent + ')' return result def children(self): """ A sequence of all children that are Nodes """ pass def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None): """ Pretty print the Node and all its attributes and children (recursively) to a buffer. buf: Open IO buffer into which the Node is printed. offset: Initial offset (amount of leading spaces) attrnames: True if you want to see the attribute names in name=value pairs. False to only see the values. nodenames: True if you want to see the actual node names within their parents. showcoord: Do you want the coordinates of each Node to be displayed. """ lead = ' ' * offset if nodenames and _my_node_name is not None: buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ') else: buf.write(lead + self.__class__.__name__+ ': ') if self.attr_names: if attrnames: nvlist = [(n, getattr(self,n)) for n in self.attr_names] attrstr = ', '.join('%s=%s' % nv for nv in nvlist) else: vlist = [getattr(self, n) for n in self.attr_names] attrstr = ', '.join('%s' % v for v in vlist) buf.write(attrstr) if showcoord: buf.write(' (at %s)' % self.coord) buf.write('\n') for (child_name, child) in self.children(): child.show( buf, offset=offset + 2, attrnames=attrnames, nodenames=nodenames, showcoord=showcoord, _my_node_name=child_name) class NodeVisitor(object): """ A base NodeVisitor class for visiting c_ast nodes. Subclass it and define your own visit_XXX methods, where XXX is the class name you want to visit with these methods. For example: class ConstantVisitor(NodeVisitor): def __init__(self): self.values = [] def visit_Constant(self, node): self.values.append(node.value) Creates a list of values of all the constant nodes encountered below the given node. To use it: cv = ConstantVisitor() cv.visit(node) Notes: * generic_visit() will be called for AST nodes for which no visit_XXX method was defined. * The children of nodes for which a visit_XXX was defined will not be visited - if you need this, call generic_visit() on the node. You can use: NodeVisitor.generic_visit(self, node) * Modeled after Python's own AST visiting facilities (the ast module of Python 3.0) """ _method_cache = None def visit(self, node): """ Visit a node. """ if self._method_cache is None: self._method_cache = {} visitor = self._method_cache.get(node.__class__.__name__, None) if visitor is None: method = 'visit_' + node.__class__.__name__ visitor = getattr(self, method, self.generic_visit) self._method_cache[node.__class__.__name__] = visitor return visitor(node) def generic_visit(self, node): """ Called if no explicit visitor function exists for a node. Implements preorder visiting of the node. """ for c in node: self.visit(c) '''
10,555
Python
30.323442
138
0.484889
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/c_ast.py
#----------------------------------------------------------------- # ** ATTENTION ** # This code was automatically generated from the file: # _c_ast.cfg # # Do not modify it directly. Modify the configuration file and # run the generator again. # ** ** *** ** ** # # pycparser: c_ast.py # # AST Node classes. # # Eli Bendersky [https://eli.thegreenplace.net/] # License: BSD #----------------------------------------------------------------- import sys def _repr(obj): """ Get the representation of an object, with dedicated pprint-like format for lists. """ if isinstance(obj, list): return '[' + (',\n '.join((_repr(e).replace('\n', '\n ') for e in obj))) + '\n]' else: return repr(obj) class Node(object): __slots__ = () """ Abstract base class for AST nodes. """ def __repr__(self): """ Generates a python representation of the current node """ result = self.__class__.__name__ + '(' indent = '' separator = '' for name in self.__slots__[:-2]: result += separator result += indent result += name + '=' + (_repr(getattr(self, name)).replace('\n', '\n ' + (' ' * (len(name) + len(self.__class__.__name__))))) separator = ',' indent = '\n ' + (' ' * len(self.__class__.__name__)) result += indent + ')' return result def children(self): """ A sequence of all children that are Nodes """ pass def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None): """ Pretty print the Node and all its attributes and children (recursively) to a buffer. buf: Open IO buffer into which the Node is printed. offset: Initial offset (amount of leading spaces) attrnames: True if you want to see the attribute names in name=value pairs. False to only see the values. nodenames: True if you want to see the actual node names within their parents. showcoord: Do you want the coordinates of each Node to be displayed. """ lead = ' ' * offset if nodenames and _my_node_name is not None: buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ') else: buf.write(lead + self.__class__.__name__+ ': ') if self.attr_names: if attrnames: nvlist = [(n, getattr(self,n)) for n in self.attr_names] attrstr = ', '.join('%s=%s' % nv for nv in nvlist) else: vlist = [getattr(self, n) for n in self.attr_names] attrstr = ', '.join('%s' % v for v in vlist) buf.write(attrstr) if showcoord: buf.write(' (at %s)' % self.coord) buf.write('\n') for (child_name, child) in self.children(): child.show( buf, offset=offset + 2, attrnames=attrnames, nodenames=nodenames, showcoord=showcoord, _my_node_name=child_name) class NodeVisitor(object): """ A base NodeVisitor class for visiting c_ast nodes. Subclass it and define your own visit_XXX methods, where XXX is the class name you want to visit with these methods. For example: class ConstantVisitor(NodeVisitor): def __init__(self): self.values = [] def visit_Constant(self, node): self.values.append(node.value) Creates a list of values of all the constant nodes encountered below the given node. To use it: cv = ConstantVisitor() cv.visit(node) Notes: * generic_visit() will be called for AST nodes for which no visit_XXX method was defined. * The children of nodes for which a visit_XXX was defined will not be visited - if you need this, call generic_visit() on the node. You can use: NodeVisitor.generic_visit(self, node) * Modeled after Python's own AST visiting facilities (the ast module of Python 3.0) """ _method_cache = None def visit(self, node): """ Visit a node. """ if self._method_cache is None: self._method_cache = {} visitor = self._method_cache.get(node.__class__.__name__, None) if visitor is None: method = 'visit_' + node.__class__.__name__ visitor = getattr(self, method, self.generic_visit) self._method_cache[node.__class__.__name__] = visitor return visitor(node) def generic_visit(self, node): """ Called if no explicit visitor function exists for a node. Implements preorder visiting of the node. """ for c in node: self.visit(c) class ArrayDecl(Node): __slots__ = ('type', 'dim', 'dim_quals', 'coord', '__weakref__') def __init__(self, type, dim, dim_quals, coord=None): self.type = type self.dim = dim self.dim_quals = dim_quals self.coord = coord def children(self): nodelist = [] if self.type is not None: nodelist.append(("type", self.type)) if self.dim is not None: nodelist.append(("dim", self.dim)) return tuple(nodelist) def __iter__(self): if self.type is not None: yield self.type if self.dim is not None: yield self.dim attr_names = ('dim_quals', ) class ArrayRef(Node): __slots__ = ('name', 'subscript', 'coord', '__weakref__') def __init__(self, name, subscript, coord=None): self.name = name self.subscript = subscript self.coord = coord def children(self): nodelist = [] if self.name is not None: nodelist.append(("name", self.name)) if self.subscript is not None: nodelist.append(("subscript", self.subscript)) return tuple(nodelist) def __iter__(self): if self.name is not None: yield self.name if self.subscript is not None: yield self.subscript attr_names = () class Assignment(Node): __slots__ = ('op', 'lvalue', 'rvalue', 'coord', '__weakref__') def __init__(self, op, lvalue, rvalue, coord=None): self.op = op self.lvalue = lvalue self.rvalue = rvalue self.coord = coord def children(self): nodelist = [] if self.lvalue is not None: nodelist.append(("lvalue", self.lvalue)) if self.rvalue is not None: nodelist.append(("rvalue", self.rvalue)) return tuple(nodelist) def __iter__(self): if self.lvalue is not None: yield self.lvalue if self.rvalue is not None: yield self.rvalue attr_names = ('op', ) class Alignas(Node): __slots__ = ('alignment', 'coord', '__weakref__') def __init__(self, alignment, coord=None): self.alignment = alignment self.coord = coord def children(self): nodelist = [] if self.alignment is not None: nodelist.append(("alignment", self.alignment)) return tuple(nodelist) def __iter__(self): if self.alignment is not None: yield self.alignment attr_names = () class BinaryOp(Node): __slots__ = ('op', 'left', 'right', 'coord', '__weakref__') def __init__(self, op, left, right, coord=None): self.op = op self.left = left self.right = right self.coord = coord def children(self): nodelist = [] if self.left is not None: nodelist.append(("left", self.left)) if self.right is not None: nodelist.append(("right", self.right)) return tuple(nodelist) def __iter__(self): if self.left is not None: yield self.left if self.right is not None: yield self.right attr_names = ('op', ) class Break(Node): __slots__ = ('coord', '__weakref__') def __init__(self, coord=None): self.coord = coord def children(self): return () def __iter__(self): return yield attr_names = () class Case(Node): __slots__ = ('expr', 'stmts', 'coord', '__weakref__') def __init__(self, expr, stmts, coord=None): self.expr = expr self.stmts = stmts self.coord = coord def children(self): nodelist = [] if self.expr is not None: nodelist.append(("expr", self.expr)) for i, child in enumerate(self.stmts or []): nodelist.append(("stmts[%d]" % i, child)) return tuple(nodelist) def __iter__(self): if self.expr is not None: yield self.expr for child in (self.stmts or []): yield child attr_names = () class Cast(Node): __slots__ = ('to_type', 'expr', 'coord', '__weakref__') def __init__(self, to_type, expr, coord=None): self.to_type = to_type self.expr = expr self.coord = coord def children(self): nodelist = [] if self.to_type is not None: nodelist.append(("to_type", self.to_type)) if self.expr is not None: nodelist.append(("expr", self.expr)) return tuple(nodelist) def __iter__(self): if self.to_type is not None: yield self.to_type if self.expr is not None: yield self.expr attr_names = () class Compound(Node): __slots__ = ('block_items', 'coord', '__weakref__') def __init__(self, block_items, coord=None): self.block_items = block_items self.coord = coord def children(self): nodelist = [] for i, child in enumerate(self.block_items or []): nodelist.append(("block_items[%d]" % i, child)) return tuple(nodelist) def __iter__(self): for child in (self.block_items or []): yield child attr_names = () class CompoundLiteral(Node): __slots__ = ('type', 'init', 'coord', '__weakref__') def __init__(self, type, init, coord=None): self.type = type self.init = init self.coord = coord def children(self): nodelist = [] if self.type is not None: nodelist.append(("type", self.type)) if self.init is not None: nodelist.append(("init", self.init)) return tuple(nodelist) def __iter__(self): if self.type is not None: yield self.type if self.init is not None: yield self.init attr_names = () class Constant(Node): __slots__ = ('type', 'value', 'coord', '__weakref__') def __init__(self, type, value, coord=None): self.type = type self.value = value self.coord = coord def children(self): nodelist = [] return tuple(nodelist) def __iter__(self): return yield attr_names = ('type', 'value', ) class Continue(Node): __slots__ = ('coord', '__weakref__') def __init__(self, coord=None): self.coord = coord def children(self): return () def __iter__(self): return yield attr_names = () class Decl(Node): __slots__ = ('name', 'quals', 'align', 'storage', 'funcspec', 'type', 'init', 'bitsize', 'coord', '__weakref__') def __init__(self, name, quals, align, storage, funcspec, type, init, bitsize, coord=None): self.name = name self.quals = quals self.align = align self.storage = storage self.funcspec = funcspec self.type = type self.init = init self.bitsize = bitsize self.coord = coord def children(self): nodelist = [] if self.type is not None: nodelist.append(("type", self.type)) if self.init is not None: nodelist.append(("init", self.init)) if self.bitsize is not None: nodelist.append(("bitsize", self.bitsize)) return tuple(nodelist) def __iter__(self): if self.type is not None: yield self.type if self.init is not None: yield self.init if self.bitsize is not None: yield self.bitsize attr_names = ('name', 'quals', 'align', 'storage', 'funcspec', ) class DeclList(Node): __slots__ = ('decls', 'coord', '__weakref__') def __init__(self, decls, coord=None): self.decls = decls self.coord = coord def children(self): nodelist = [] for i, child in enumerate(self.decls or []): nodelist.append(("decls[%d]" % i, child)) return tuple(nodelist) def __iter__(self): for child in (self.decls or []): yield child attr_names = () class Default(Node): __slots__ = ('stmts', 'coord', '__weakref__') def __init__(self, stmts, coord=None): self.stmts = stmts self.coord = coord def children(self): nodelist = [] for i, child in enumerate(self.stmts or []): nodelist.append(("stmts[%d]" % i, child)) return tuple(nodelist) def __iter__(self): for child in (self.stmts or []): yield child attr_names = () class DoWhile(Node): __slots__ = ('cond', 'stmt', 'coord', '__weakref__') def __init__(self, cond, stmt, coord=None): self.cond = cond self.stmt = stmt self.coord = coord def children(self): nodelist = [] if self.cond is not None: nodelist.append(("cond", self.cond)) if self.stmt is not None: nodelist.append(("stmt", self.stmt)) return tuple(nodelist) def __iter__(self): if self.cond is not None: yield self.cond if self.stmt is not None: yield self.stmt attr_names = () class EllipsisParam(Node): __slots__ = ('coord', '__weakref__') def __init__(self, coord=None): self.coord = coord def children(self): return () def __iter__(self): return yield attr_names = () class EmptyStatement(Node): __slots__ = ('coord', '__weakref__') def __init__(self, coord=None): self.coord = coord def children(self): return () def __iter__(self): return yield attr_names = () class Enum(Node): __slots__ = ('name', 'values', 'coord', '__weakref__') def __init__(self, name, values, coord=None): self.name = name self.values = values self.coord = coord def children(self): nodelist = [] if self.values is not None: nodelist.append(("values", self.values)) return tuple(nodelist) def __iter__(self): if self.values is not None: yield self.values attr_names = ('name', ) class Enumerator(Node): __slots__ = ('name', 'value', 'coord', '__weakref__') def __init__(self, name, value, coord=None): self.name = name self.value = value self.coord = coord def children(self): nodelist = [] if self.value is not None: nodelist.append(("value", self.value)) return tuple(nodelist) def __iter__(self): if self.value is not None: yield self.value attr_names = ('name', ) class EnumeratorList(Node): __slots__ = ('enumerators', 'coord', '__weakref__') def __init__(self, enumerators, coord=None): self.enumerators = enumerators self.coord = coord def children(self): nodelist = [] for i, child in enumerate(self.enumerators or []): nodelist.append(("enumerators[%d]" % i, child)) return tuple(nodelist) def __iter__(self): for child in (self.enumerators or []): yield child attr_names = () class ExprList(Node): __slots__ = ('exprs', 'coord', '__weakref__') def __init__(self, exprs, coord=None): self.exprs = exprs self.coord = coord def children(self): nodelist = [] for i, child in enumerate(self.exprs or []): nodelist.append(("exprs[%d]" % i, child)) return tuple(nodelist) def __iter__(self): for child in (self.exprs or []): yield child attr_names = () class FileAST(Node): __slots__ = ('ext', 'coord', '__weakref__') def __init__(self, ext, coord=None): self.ext = ext self.coord = coord def children(self): nodelist = [] for i, child in enumerate(self.ext or []): nodelist.append(("ext[%d]" % i, child)) return tuple(nodelist) def __iter__(self): for child in (self.ext or []): yield child attr_names = () class For(Node): __slots__ = ('init', 'cond', 'next', 'stmt', 'coord', '__weakref__') def __init__(self, init, cond, next, stmt, coord=None): self.init = init self.cond = cond self.next = next self.stmt = stmt self.coord = coord def children(self): nodelist = [] if self.init is not None: nodelist.append(("init", self.init)) if self.cond is not None: nodelist.append(("cond", self.cond)) if self.next is not None: nodelist.append(("next", self.next)) if self.stmt is not None: nodelist.append(("stmt", self.stmt)) return tuple(nodelist) def __iter__(self): if self.init is not None: yield self.init if self.cond is not None: yield self.cond if self.next is not None: yield self.next if self.stmt is not None: yield self.stmt attr_names = () class FuncCall(Node): __slots__ = ('name', 'args', 'coord', '__weakref__') def __init__(self, name, args, coord=None): self.name = name self.args = args self.coord = coord def children(self): nodelist = [] if self.name is not None: nodelist.append(("name", self.name)) if self.args is not None: nodelist.append(("args", self.args)) return tuple(nodelist) def __iter__(self): if self.name is not None: yield self.name if self.args is not None: yield self.args attr_names = () class FuncDecl(Node): __slots__ = ('args', 'type', 'coord', '__weakref__') def __init__(self, args, type, coord=None): self.args = args self.type = type self.coord = coord def children(self): nodelist = [] if self.args is not None: nodelist.append(("args", self.args)) if self.type is not None: nodelist.append(("type", self.type)) return tuple(nodelist) def __iter__(self): if self.args is not None: yield self.args if self.type is not None: yield self.type attr_names = () class FuncDef(Node): __slots__ = ('decl', 'param_decls', 'body', 'coord', '__weakref__') def __init__(self, decl, param_decls, body, coord=None): self.decl = decl self.param_decls = param_decls self.body = body self.coord = coord def children(self): nodelist = [] if self.decl is not None: nodelist.append(("decl", self.decl)) if self.body is not None: nodelist.append(("body", self.body)) for i, child in enumerate(self.param_decls or []): nodelist.append(("param_decls[%d]" % i, child)) return tuple(nodelist) def __iter__(self): if self.decl is not None: yield self.decl if self.body is not None: yield self.body for child in (self.param_decls or []): yield child attr_names = () class Goto(Node): __slots__ = ('name', 'coord', '__weakref__') def __init__(self, name, coord=None): self.name = name self.coord = coord def children(self): nodelist = [] return tuple(nodelist) def __iter__(self): return yield attr_names = ('name', ) class ID(Node): __slots__ = ('name', 'coord', '__weakref__') def __init__(self, name, coord=None): self.name = name self.coord = coord def children(self): nodelist = [] return tuple(nodelist) def __iter__(self): return yield attr_names = ('name', ) class IdentifierType(Node): __slots__ = ('names', 'coord', '__weakref__') def __init__(self, names, coord=None): self.names = names self.coord = coord def children(self): nodelist = [] return tuple(nodelist) def __iter__(self): return yield attr_names = ('names', ) class If(Node): __slots__ = ('cond', 'iftrue', 'iffalse', 'coord', '__weakref__') def __init__(self, cond, iftrue, iffalse, coord=None): self.cond = cond self.iftrue = iftrue self.iffalse = iffalse self.coord = coord def children(self): nodelist = [] if self.cond is not None: nodelist.append(("cond", self.cond)) if self.iftrue is not None: nodelist.append(("iftrue", self.iftrue)) if self.iffalse is not None: nodelist.append(("iffalse", self.iffalse)) return tuple(nodelist) def __iter__(self): if self.cond is not None: yield self.cond if self.iftrue is not None: yield self.iftrue if self.iffalse is not None: yield self.iffalse attr_names = () class InitList(Node): __slots__ = ('exprs', 'coord', '__weakref__') def __init__(self, exprs, coord=None): self.exprs = exprs self.coord = coord def children(self): nodelist = [] for i, child in enumerate(self.exprs or []): nodelist.append(("exprs[%d]" % i, child)) return tuple(nodelist) def __iter__(self): for child in (self.exprs or []): yield child attr_names = () class Label(Node): __slots__ = ('name', 'stmt', 'coord', '__weakref__') def __init__(self, name, stmt, coord=None): self.name = name self.stmt = stmt self.coord = coord def children(self): nodelist = [] if self.stmt is not None: nodelist.append(("stmt", self.stmt)) return tuple(nodelist) def __iter__(self): if self.stmt is not None: yield self.stmt attr_names = ('name', ) class NamedInitializer(Node): __slots__ = ('name', 'expr', 'coord', '__weakref__') def __init__(self, name, expr, coord=None): self.name = name self.expr = expr self.coord = coord def children(self): nodelist = [] if self.expr is not None: nodelist.append(("expr", self.expr)) for i, child in enumerate(self.name or []): nodelist.append(("name[%d]" % i, child)) return tuple(nodelist) def __iter__(self): if self.expr is not None: yield self.expr for child in (self.name or []): yield child attr_names = () class ParamList(Node): __slots__ = ('params', 'coord', '__weakref__') def __init__(self, params, coord=None): self.params = params self.coord = coord def children(self): nodelist = [] for i, child in enumerate(self.params or []): nodelist.append(("params[%d]" % i, child)) return tuple(nodelist) def __iter__(self): for child in (self.params or []): yield child attr_names = () class PtrDecl(Node): __slots__ = ('quals', 'type', 'coord', '__weakref__') def __init__(self, quals, type, coord=None): self.quals = quals self.type = type self.coord = coord def children(self): nodelist = [] if self.type is not None: nodelist.append(("type", self.type)) return tuple(nodelist) def __iter__(self): if self.type is not None: yield self.type attr_names = ('quals', ) class Return(Node): __slots__ = ('expr', 'coord', '__weakref__') def __init__(self, expr, coord=None): self.expr = expr self.coord = coord def children(self): nodelist = [] if self.expr is not None: nodelist.append(("expr", self.expr)) return tuple(nodelist) def __iter__(self): if self.expr is not None: yield self.expr attr_names = () class StaticAssert(Node): __slots__ = ('cond', 'message', 'coord', '__weakref__') def __init__(self, cond, message, coord=None): self.cond = cond self.message = message self.coord = coord def children(self): nodelist = [] if self.cond is not None: nodelist.append(("cond", self.cond)) if self.message is not None: nodelist.append(("message", self.message)) return tuple(nodelist) def __iter__(self): if self.cond is not None: yield self.cond if self.message is not None: yield self.message attr_names = () class Struct(Node): __slots__ = ('name', 'decls', 'coord', '__weakref__') def __init__(self, name, decls, coord=None): self.name = name self.decls = decls self.coord = coord def children(self): nodelist = [] for i, child in enumerate(self.decls or []): nodelist.append(("decls[%d]" % i, child)) return tuple(nodelist) def __iter__(self): for child in (self.decls or []): yield child attr_names = ('name', ) class StructRef(Node): __slots__ = ('name', 'type', 'field', 'coord', '__weakref__') def __init__(self, name, type, field, coord=None): self.name = name self.type = type self.field = field self.coord = coord def children(self): nodelist = [] if self.name is not None: nodelist.append(("name", self.name)) if self.field is not None: nodelist.append(("field", self.field)) return tuple(nodelist) def __iter__(self): if self.name is not None: yield self.name if self.field is not None: yield self.field attr_names = ('type', ) class Switch(Node): __slots__ = ('cond', 'stmt', 'coord', '__weakref__') def __init__(self, cond, stmt, coord=None): self.cond = cond self.stmt = stmt self.coord = coord def children(self): nodelist = [] if self.cond is not None: nodelist.append(("cond", self.cond)) if self.stmt is not None: nodelist.append(("stmt", self.stmt)) return tuple(nodelist) def __iter__(self): if self.cond is not None: yield self.cond if self.stmt is not None: yield self.stmt attr_names = () class TernaryOp(Node): __slots__ = ('cond', 'iftrue', 'iffalse', 'coord', '__weakref__') def __init__(self, cond, iftrue, iffalse, coord=None): self.cond = cond self.iftrue = iftrue self.iffalse = iffalse self.coord = coord def children(self): nodelist = [] if self.cond is not None: nodelist.append(("cond", self.cond)) if self.iftrue is not None: nodelist.append(("iftrue", self.iftrue)) if self.iffalse is not None: nodelist.append(("iffalse", self.iffalse)) return tuple(nodelist) def __iter__(self): if self.cond is not None: yield self.cond if self.iftrue is not None: yield self.iftrue if self.iffalse is not None: yield self.iffalse attr_names = () class TypeDecl(Node): __slots__ = ('declname', 'quals', 'align', 'type', 'coord', '__weakref__') def __init__(self, declname, quals, align, type, coord=None): self.declname = declname self.quals = quals self.align = align self.type = type self.coord = coord def children(self): nodelist = [] if self.type is not None: nodelist.append(("type", self.type)) return tuple(nodelist) def __iter__(self): if self.type is not None: yield self.type attr_names = ('declname', 'quals', 'align', ) class Typedef(Node): __slots__ = ('name', 'quals', 'storage', 'type', 'coord', '__weakref__') def __init__(self, name, quals, storage, type, coord=None): self.name = name self.quals = quals self.storage = storage self.type = type self.coord = coord def children(self): nodelist = [] if self.type is not None: nodelist.append(("type", self.type)) return tuple(nodelist) def __iter__(self): if self.type is not None: yield self.type attr_names = ('name', 'quals', 'storage', ) class Typename(Node): __slots__ = ('name', 'quals', 'align', 'type', 'coord', '__weakref__') def __init__(self, name, quals, align, type, coord=None): self.name = name self.quals = quals self.align = align self.type = type self.coord = coord def children(self): nodelist = [] if self.type is not None: nodelist.append(("type", self.type)) return tuple(nodelist) def __iter__(self): if self.type is not None: yield self.type attr_names = ('name', 'quals', 'align', ) class UnaryOp(Node): __slots__ = ('op', 'expr', 'coord', '__weakref__') def __init__(self, op, expr, coord=None): self.op = op self.expr = expr self.coord = coord def children(self): nodelist = [] if self.expr is not None: nodelist.append(("expr", self.expr)) return tuple(nodelist) def __iter__(self): if self.expr is not None: yield self.expr attr_names = ('op', ) class Union(Node): __slots__ = ('name', 'decls', 'coord', '__weakref__') def __init__(self, name, decls, coord=None): self.name = name self.decls = decls self.coord = coord def children(self): nodelist = [] for i, child in enumerate(self.decls or []): nodelist.append(("decls[%d]" % i, child)) return tuple(nodelist) def __iter__(self): for child in (self.decls or []): yield child attr_names = ('name', ) class While(Node): __slots__ = ('cond', 'stmt', 'coord', '__weakref__') def __init__(self, cond, stmt, coord=None): self.cond = cond self.stmt = stmt self.coord = coord def children(self): nodelist = [] if self.cond is not None: nodelist.append(("cond", self.cond)) if self.stmt is not None: nodelist.append(("stmt", self.stmt)) return tuple(nodelist) def __iter__(self): if self.cond is not None: yield self.cond if self.stmt is not None: yield self.stmt attr_names = () class Pragma(Node): __slots__ = ('string', 'coord', '__weakref__') def __init__(self, string, coord=None): self.string = string self.coord = coord def children(self): nodelist = [] return tuple(nodelist) def __iter__(self): return yield attr_names = ('string', )
31,445
Python
26.927176
138
0.536747
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/__init__.py
#----------------------------------------------------------------- # pycparser: __init__.py # # This package file exports some convenience functions for # interacting with pycparser # # Eli Bendersky [https://eli.thegreenplace.net/] # License: BSD #----------------------------------------------------------------- __all__ = ['c_lexer', 'c_parser', 'c_ast'] __version__ = '2.21' import io from subprocess import check_output from .c_parser import CParser def preprocess_file(filename, cpp_path='cpp', cpp_args=''): """ Preprocess a file using cpp. filename: Name of the file you want to preprocess. cpp_path: cpp_args: Refer to the documentation of parse_file for the meaning of these arguments. When successful, returns the preprocessed file's contents. Errors from cpp will be printed out. """ path_list = [cpp_path] if isinstance(cpp_args, list): path_list += cpp_args elif cpp_args != '': path_list += [cpp_args] path_list += [filename] try: # Note the use of universal_newlines to treat all newlines # as \n for Python's purpose text = check_output(path_list, universal_newlines=True) except OSError as e: raise RuntimeError("Unable to invoke 'cpp'. " + 'Make sure its path was passed correctly\n' + ('Original error: %s' % e)) return text def parse_file(filename, use_cpp=False, cpp_path='cpp', cpp_args='', parser=None): """ Parse a C file using pycparser. filename: Name of the file you want to parse. use_cpp: Set to True if you want to execute the C pre-processor on the file prior to parsing it. cpp_path: If use_cpp is True, this is the path to 'cpp' on your system. If no path is provided, it attempts to just execute 'cpp', so it must be in your PATH. cpp_args: If use_cpp is True, set this to the command line arguments strings to cpp. Be careful with quotes - it's best to pass a raw string (r'') here. For example: r'-I../utils/fake_libc_include' If several arguments are required, pass a list of strings. parser: Optional parser object to be used instead of the default CParser When successful, an AST is returned. ParseError can be thrown if the file doesn't parse successfully. Errors from cpp will be printed out. """ if use_cpp: text = preprocess_file(filename, cpp_path, cpp_args) else: with io.open(filename) as f: text = f.read() if parser is None: parser = CParser() return parser.parse(text, filename)
2,815
Python
29.945055
78
0.572647
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/c_lexer.py
#------------------------------------------------------------------------------ # pycparser: c_lexer.py # # CLexer class: lexer for the C language # # Eli Bendersky [https://eli.thegreenplace.net/] # License: BSD #------------------------------------------------------------------------------ import re from .ply import lex from .ply.lex import TOKEN class CLexer(object): """ A lexer for the C language. After building it, set the input text with input(), and call token() to get new tokens. The public attribute filename can be set to an initial filename, but the lexer will update it upon #line directives. """ def __init__(self, error_func, on_lbrace_func, on_rbrace_func, type_lookup_func): """ Create a new Lexer. error_func: An error function. Will be called with an error message, line and column as arguments, in case of an error during lexing. on_lbrace_func, on_rbrace_func: Called when an LBRACE or RBRACE is encountered (likely to push/pop type_lookup_func's scope) type_lookup_func: A type lookup function. Given a string, it must return True IFF this string is a name of a type that was defined with a typedef earlier. """ self.error_func = error_func self.on_lbrace_func = on_lbrace_func self.on_rbrace_func = on_rbrace_func self.type_lookup_func = type_lookup_func self.filename = '' # Keeps track of the last token returned from self.token() self.last_token = None # Allow either "# line" or "# <num>" to support GCC's # cpp output # self.line_pattern = re.compile(r'([ \t]*line\W)|([ \t]*\d+)') self.pragma_pattern = re.compile(r'[ \t]*pragma\W') def build(self, **kwargs): """ Builds the lexer from the specification. Must be called after the lexer object is created. This method exists separately, because the PLY manual warns against calling lex.lex inside __init__ """ self.lexer = lex.lex(object=self, **kwargs) def reset_lineno(self): """ Resets the internal line number counter of the lexer. """ self.lexer.lineno = 1 def input(self, text): self.lexer.input(text) def token(self): self.last_token = self.lexer.token() return self.last_token def find_tok_column(self, token): """ Find the column of the token in its line. """ last_cr = self.lexer.lexdata.rfind('\n', 0, token.lexpos) return token.lexpos - last_cr ######################-- PRIVATE --###################### ## ## Internal auxiliary methods ## def _error(self, msg, token): location = self._make_tok_location(token) self.error_func(msg, location[0], location[1]) self.lexer.skip(1) def _make_tok_location(self, token): return (token.lineno, self.find_tok_column(token)) ## ## Reserved keywords ## keywords = ( 'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST', 'CONTINUE', 'DEFAULT', 'DO', 'DOUBLE', 'ELSE', 'ENUM', 'EXTERN', 'FLOAT', 'FOR', 'GOTO', 'IF', 'INLINE', 'INT', 'LONG', 'REGISTER', 'OFFSETOF', 'RESTRICT', 'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT', 'SWITCH', 'TYPEDEF', 'UNION', 'UNSIGNED', 'VOID', 'VOLATILE', 'WHILE', '__INT128', ) keywords_new = ( '_BOOL', '_COMPLEX', '_NORETURN', '_THREAD_LOCAL', '_STATIC_ASSERT', '_ATOMIC', '_ALIGNOF', '_ALIGNAS', ) keyword_map = {} for keyword in keywords: keyword_map[keyword.lower()] = keyword for keyword in keywords_new: keyword_map[keyword[:2].upper() + keyword[2:].lower()] = keyword ## ## All the tokens recognized by the lexer ## tokens = keywords + keywords_new + ( # Identifiers 'ID', # Type identifiers (identifiers previously defined as # types with typedef) 'TYPEID', # constants 'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX', 'INT_CONST_BIN', 'INT_CONST_CHAR', 'FLOAT_CONST', 'HEX_FLOAT_CONST', 'CHAR_CONST', 'WCHAR_CONST', 'U8CHAR_CONST', 'U16CHAR_CONST', 'U32CHAR_CONST', # String literals 'STRING_LITERAL', 'WSTRING_LITERAL', 'U8STRING_LITERAL', 'U16STRING_LITERAL', 'U32STRING_LITERAL', # Operators 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD', 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT', 'LOR', 'LAND', 'LNOT', 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE', # Assignment 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL', 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL', # Increment/decrement 'PLUSPLUS', 'MINUSMINUS', # Structure dereference (->) 'ARROW', # Conditional operator (?) 'CONDOP', # Delimiters 'LPAREN', 'RPAREN', # ( ) 'LBRACKET', 'RBRACKET', # [ ] 'LBRACE', 'RBRACE', # { } 'COMMA', 'PERIOD', # . , 'SEMI', 'COLON', # ; : # Ellipsis (...) 'ELLIPSIS', # pre-processor 'PPHASH', # '#' 'PPPRAGMA', # 'pragma' 'PPPRAGMASTR', ) ## ## Regexes for use in tokens ## ## # valid C identifiers (K&R2: A.2.3), plus '$' (supported by some compilers) identifier = r'[a-zA-Z_$][0-9a-zA-Z_$]*' hex_prefix = '0[xX]' hex_digits = '[0-9a-fA-F]+' bin_prefix = '0[bB]' bin_digits = '[01]+' # integer constants (K&R2: A.2.5.1) integer_suffix_opt = r'(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?' decimal_constant = '(0'+integer_suffix_opt+')|([1-9][0-9]*'+integer_suffix_opt+')' octal_constant = '0[0-7]*'+integer_suffix_opt hex_constant = hex_prefix+hex_digits+integer_suffix_opt bin_constant = bin_prefix+bin_digits+integer_suffix_opt bad_octal_constant = '0[0-7]*[89]' # character constants (K&R2: A.2.5.2) # Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line # directives with Windows paths as filenames (..\..\dir\file) # For the same reason, decimal_escape allows all digit sequences. We want to # parse all correct code, even if it means to sometimes parse incorrect # code. # # The original regexes were taken verbatim from the C syntax definition, # and were later modified to avoid worst-case exponential running time. # # simple_escape = r"""([a-zA-Z._~!=&\^\-\\?'"])""" # decimal_escape = r"""(\d+)""" # hex_escape = r"""(x[0-9a-fA-F]+)""" # bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])""" # # The following modifications were made to avoid the ambiguity that allowed backtracking: # (https://github.com/eliben/pycparser/issues/61) # # - \x was removed from simple_escape, unless it was not followed by a hex digit, to avoid ambiguity with hex_escape. # - hex_escape allows one or more hex characters, but requires that the next character(if any) is not hex # - decimal_escape allows one or more decimal characters, but requires that the next character(if any) is not a decimal # - bad_escape does not allow any decimals (8-9), to avoid conflicting with the permissive decimal_escape. # # Without this change, python's `re` module would recursively try parsing each ambiguous escape sequence in multiple ways. # e.g. `\123` could be parsed as `\1`+`23`, `\12`+`3`, and `\123`. simple_escape = r"""([a-wyzA-Z._~!=&\^\-\\?'"]|x(?![0-9a-fA-F]))""" decimal_escape = r"""(\d+)(?!\d)""" hex_escape = r"""(x[0-9a-fA-F]+)(?![0-9a-fA-F])""" bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-9])""" escape_sequence = r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))' # This complicated regex with lookahead might be slow for strings, so because all of the valid escapes (including \x) allowed # 0 or more non-escaped characters after the first character, simple_escape+decimal_escape+hex_escape got simplified to escape_sequence_start_in_string = r"""(\\[0-9a-zA-Z._~!=&\^\-\\?'"])""" cconst_char = r"""([^'\\\n]|"""+escape_sequence+')' char_const = "'"+cconst_char+"'" wchar_const = 'L'+char_const u8char_const = 'u8'+char_const u16char_const = 'u'+char_const u32char_const = 'U'+char_const multicharacter_constant = "'"+cconst_char+"{2,4}'" unmatched_quote = "('"+cconst_char+"*\\n)|('"+cconst_char+"*$)" bad_char_const = r"""('"""+cconst_char+"""[^'\n]+')|('')|('"""+bad_escape+r"""[^'\n]*')""" # string literals (K&R2: A.2.6) string_char = r"""([^"\\\n]|"""+escape_sequence_start_in_string+')' string_literal = '"'+string_char+'*"' wstring_literal = 'L'+string_literal u8string_literal = 'u8'+string_literal u16string_literal = 'u'+string_literal u32string_literal = 'U'+string_literal bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"' # floating constants (K&R2: A.2.5.3) exponent_part = r"""([eE][-+]?[0-9]+)""" fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)""" floating_constant = '(((('+fractional_constant+')'+exponent_part+'?)|([0-9]+'+exponent_part+'))[FfLl]?)' binary_exponent_part = r'''([pP][+-]?[0-9]+)''' hex_fractional_constant = '((('+hex_digits+r""")?\."""+hex_digits+')|('+hex_digits+r"""\.))""" hex_floating_constant = '('+hex_prefix+'('+hex_digits+'|'+hex_fractional_constant+')'+binary_exponent_part+'[FfLl]?)' ## ## Lexer states: used for preprocessor \n-terminated directives ## states = ( # ppline: preprocessor line directives # ('ppline', 'exclusive'), # pppragma: pragma # ('pppragma', 'exclusive'), ) def t_PPHASH(self, t): r'[ \t]*\#' if self.line_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos): t.lexer.begin('ppline') self.pp_line = self.pp_filename = None elif self.pragma_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos): t.lexer.begin('pppragma') else: t.type = 'PPHASH' return t ## ## Rules for the ppline state ## @TOKEN(string_literal) def t_ppline_FILENAME(self, t): if self.pp_line is None: self._error('filename before line number in #line', t) else: self.pp_filename = t.value.lstrip('"').rstrip('"') @TOKEN(decimal_constant) def t_ppline_LINE_NUMBER(self, t): if self.pp_line is None: self.pp_line = t.value else: # Ignore: GCC's cpp sometimes inserts a numeric flag # after the file name pass def t_ppline_NEWLINE(self, t): r'\n' if self.pp_line is None: self._error('line number missing in #line', t) else: self.lexer.lineno = int(self.pp_line) if self.pp_filename is not None: self.filename = self.pp_filename t.lexer.begin('INITIAL') def t_ppline_PPLINE(self, t): r'line' pass t_ppline_ignore = ' \t' def t_ppline_error(self, t): self._error('invalid #line directive', t) ## ## Rules for the pppragma state ## def t_pppragma_NEWLINE(self, t): r'\n' t.lexer.lineno += 1 t.lexer.begin('INITIAL') def t_pppragma_PPPRAGMA(self, t): r'pragma' return t t_pppragma_ignore = ' \t' def t_pppragma_STR(self, t): '.+' t.type = 'PPPRAGMASTR' return t def t_pppragma_error(self, t): self._error('invalid #pragma directive', t) ## ## Rules for the normal state ## t_ignore = ' \t' # Newlines def t_NEWLINE(self, t): r'\n+' t.lexer.lineno += t.value.count("\n") # Operators t_PLUS = r'\+' t_MINUS = r'-' t_TIMES = r'\*' t_DIVIDE = r'/' t_MOD = r'%' t_OR = r'\|' t_AND = r'&' t_NOT = r'~' t_XOR = r'\^' t_LSHIFT = r'<<' t_RSHIFT = r'>>' t_LOR = r'\|\|' t_LAND = r'&&' t_LNOT = r'!' t_LT = r'<' t_GT = r'>' t_LE = r'<=' t_GE = r'>=' t_EQ = r'==' t_NE = r'!=' # Assignment operators t_EQUALS = r'=' t_TIMESEQUAL = r'\*=' t_DIVEQUAL = r'/=' t_MODEQUAL = r'%=' t_PLUSEQUAL = r'\+=' t_MINUSEQUAL = r'-=' t_LSHIFTEQUAL = r'<<=' t_RSHIFTEQUAL = r'>>=' t_ANDEQUAL = r'&=' t_OREQUAL = r'\|=' t_XOREQUAL = r'\^=' # Increment/decrement t_PLUSPLUS = r'\+\+' t_MINUSMINUS = r'--' # -> t_ARROW = r'->' # ? t_CONDOP = r'\?' # Delimiters t_LPAREN = r'\(' t_RPAREN = r'\)' t_LBRACKET = r'\[' t_RBRACKET = r'\]' t_COMMA = r',' t_PERIOD = r'\.' t_SEMI = r';' t_COLON = r':' t_ELLIPSIS = r'\.\.\.' # Scope delimiters # To see why on_lbrace_func is needed, consider: # typedef char TT; # void foo(int TT) { TT = 10; } # TT x = 5; # Outside the function, TT is a typedef, but inside (starting and ending # with the braces) it's a parameter. The trouble begins with yacc's # lookahead token. If we open a new scope in brace_open, then TT has # already been read and incorrectly interpreted as TYPEID. So, we need # to open and close scopes from within the lexer. # Similar for the TT immediately outside the end of the function. # @TOKEN(r'\{') def t_LBRACE(self, t): self.on_lbrace_func() return t @TOKEN(r'\}') def t_RBRACE(self, t): self.on_rbrace_func() return t t_STRING_LITERAL = string_literal # The following floating and integer constants are defined as # functions to impose a strict order (otherwise, decimal # is placed before the others because its regex is longer, # and this is bad) # @TOKEN(floating_constant) def t_FLOAT_CONST(self, t): return t @TOKEN(hex_floating_constant) def t_HEX_FLOAT_CONST(self, t): return t @TOKEN(hex_constant) def t_INT_CONST_HEX(self, t): return t @TOKEN(bin_constant) def t_INT_CONST_BIN(self, t): return t @TOKEN(bad_octal_constant) def t_BAD_CONST_OCT(self, t): msg = "Invalid octal constant" self._error(msg, t) @TOKEN(octal_constant) def t_INT_CONST_OCT(self, t): return t @TOKEN(decimal_constant) def t_INT_CONST_DEC(self, t): return t # Must come before bad_char_const, to prevent it from # catching valid char constants as invalid # @TOKEN(multicharacter_constant) def t_INT_CONST_CHAR(self, t): return t @TOKEN(char_const) def t_CHAR_CONST(self, t): return t @TOKEN(wchar_const) def t_WCHAR_CONST(self, t): return t @TOKEN(u8char_const) def t_U8CHAR_CONST(self, t): return t @TOKEN(u16char_const) def t_U16CHAR_CONST(self, t): return t @TOKEN(u32char_const) def t_U32CHAR_CONST(self, t): return t @TOKEN(unmatched_quote) def t_UNMATCHED_QUOTE(self, t): msg = "Unmatched '" self._error(msg, t) @TOKEN(bad_char_const) def t_BAD_CHAR_CONST(self, t): msg = "Invalid char constant %s" % t.value self._error(msg, t) @TOKEN(wstring_literal) def t_WSTRING_LITERAL(self, t): return t @TOKEN(u8string_literal) def t_U8STRING_LITERAL(self, t): return t @TOKEN(u16string_literal) def t_U16STRING_LITERAL(self, t): return t @TOKEN(u32string_literal) def t_U32STRING_LITERAL(self, t): return t # unmatched string literals are caught by the preprocessor @TOKEN(bad_string_literal) def t_BAD_STRING_LITERAL(self, t): msg = "String contains invalid escape code" self._error(msg, t) @TOKEN(identifier) def t_ID(self, t): t.type = self.keyword_map.get(t.value, "ID") if t.type == 'ID' and self.type_lookup_func(t.value): t.type = "TYPEID" return t def t_error(self, t): msg = 'Illegal character %s' % repr(t.value[0]) self._error(msg, t)
17,167
Python
29.933333
129
0.528747
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/c_parser.py
#------------------------------------------------------------------------------ # pycparser: c_parser.py # # CParser class: Parser and AST builder for the C language # # Eli Bendersky [https://eli.thegreenplace.net/] # License: BSD #------------------------------------------------------------------------------ from .ply import yacc from . import c_ast from .c_lexer import CLexer from .plyparser import PLYParser, ParseError, parameterized, template from .ast_transforms import fix_switch_cases, fix_atomic_specifiers @template class CParser(PLYParser): def __init__( self, lex_optimize=True, lexer=CLexer, lextab='pycparser.lextab', yacc_optimize=True, yacctab='pycparser.yacctab', yacc_debug=False, taboutputdir=''): """ Create a new CParser. Some arguments for controlling the debug/optimization level of the parser are provided. The defaults are tuned for release/performance mode. The simple rules for using them are: *) When tweaking CParser/CLexer, set these to False *) When releasing a stable parser, set to True lex_optimize: Set to False when you're modifying the lexer. Otherwise, changes in the lexer won't be used, if some lextab.py file exists. When releasing with a stable lexer, set to True to save the re-generation of the lexer table on each run. lexer: Set this parameter to define the lexer to use if you're not using the default CLexer. lextab: Points to the lex table that's used for optimized mode. Only if you're modifying the lexer and want some tests to avoid re-generating the table, make this point to a local lex table file (that's been earlier generated with lex_optimize=True) yacc_optimize: Set to False when you're modifying the parser. Otherwise, changes in the parser won't be used, if some parsetab.py file exists. When releasing with a stable parser, set to True to save the re-generation of the parser table on each run. yacctab: Points to the yacc table that's used for optimized mode. Only if you're modifying the parser, make this point to a local yacc table file yacc_debug: Generate a parser.out file that explains how yacc built the parsing table from the grammar. taboutputdir: Set this parameter to control the location of generated lextab and yacctab files. """ self.clex = lexer( error_func=self._lex_error_func, on_lbrace_func=self._lex_on_lbrace_func, on_rbrace_func=self._lex_on_rbrace_func, type_lookup_func=self._lex_type_lookup_func) self.clex.build( optimize=lex_optimize, lextab=lextab, outputdir=taboutputdir) self.tokens = self.clex.tokens rules_with_opt = [ 'abstract_declarator', 'assignment_expression', 'declaration_list', 'declaration_specifiers_no_type', 'designation', 'expression', 'identifier_list', 'init_declarator_list', 'id_init_declarator_list', 'initializer_list', 'parameter_type_list', 'block_item_list', 'type_qualifier_list', 'struct_declarator_list' ] for rule in rules_with_opt: self._create_opt_rule(rule) self.cparser = yacc.yacc( module=self, start='translation_unit_or_empty', debug=yacc_debug, optimize=yacc_optimize, tabmodule=yacctab, outputdir=taboutputdir) # Stack of scopes for keeping track of symbols. _scope_stack[-1] is # the current (topmost) scope. Each scope is a dictionary that # specifies whether a name is a type. If _scope_stack[n][name] is # True, 'name' is currently a type in the scope. If it's False, # 'name' is used in the scope but not as a type (for instance, if we # saw: int name; # If 'name' is not a key in _scope_stack[n] then 'name' was not defined # in this scope at all. self._scope_stack = [dict()] # Keeps track of the last token given to yacc (the lookahead token) self._last_yielded_token = None def parse(self, text, filename='', debug=False): """ Parses C code and returns an AST. text: A string containing the C source code filename: Name of the file being parsed (for meaningful error messages) debug: Debug flag to YACC """ self.clex.filename = filename self.clex.reset_lineno() self._scope_stack = [dict()] self._last_yielded_token = None return self.cparser.parse( input=text, lexer=self.clex, debug=debug) ######################-- PRIVATE --###################### def _push_scope(self): self._scope_stack.append(dict()) def _pop_scope(self): assert len(self._scope_stack) > 1 self._scope_stack.pop() def _add_typedef_name(self, name, coord): """ Add a new typedef name (ie a TYPEID) to the current scope """ if not self._scope_stack[-1].get(name, True): self._parse_error( "Typedef %r previously declared as non-typedef " "in this scope" % name, coord) self._scope_stack[-1][name] = True def _add_identifier(self, name, coord): """ Add a new object, function, or enum member name (ie an ID) to the current scope """ if self._scope_stack[-1].get(name, False): self._parse_error( "Non-typedef %r previously declared as typedef " "in this scope" % name, coord) self._scope_stack[-1][name] = False def _is_type_in_scope(self, name): """ Is *name* a typedef-name in the current scope? """ for scope in reversed(self._scope_stack): # If name is an identifier in this scope it shadows typedefs in # higher scopes. in_scope = scope.get(name) if in_scope is not None: return in_scope return False def _lex_error_func(self, msg, line, column): self._parse_error(msg, self._coord(line, column)) def _lex_on_lbrace_func(self): self._push_scope() def _lex_on_rbrace_func(self): self._pop_scope() def _lex_type_lookup_func(self, name): """ Looks up types that were previously defined with typedef. Passed to the lexer for recognizing identifiers that are types. """ is_type = self._is_type_in_scope(name) return is_type def _get_yacc_lookahead_token(self): """ We need access to yacc's lookahead token in certain cases. This is the last token yacc requested from the lexer, so we ask the lexer. """ return self.clex.last_token # To understand what's going on here, read sections A.8.5 and # A.8.6 of K&R2 very carefully. # # A C type consists of a basic type declaration, with a list # of modifiers. For example: # # int *c[5]; # # The basic declaration here is 'int c', and the pointer and # the array are the modifiers. # # Basic declarations are represented by TypeDecl (from module c_ast) and the # modifiers are FuncDecl, PtrDecl and ArrayDecl. # # The standard states that whenever a new modifier is parsed, it should be # added to the end of the list of modifiers. For example: # # K&R2 A.8.6.2: Array Declarators # # In a declaration T D where D has the form # D1 [constant-expression-opt] # and the type of the identifier in the declaration T D1 is # "type-modifier T", the type of the # identifier of D is "type-modifier array of T" # # This is what this method does. The declarator it receives # can be a list of declarators ending with TypeDecl. It # tacks the modifier to the end of this list, just before # the TypeDecl. # # Additionally, the modifier may be a list itself. This is # useful for pointers, that can come as a chain from the rule # p_pointer. In this case, the whole modifier list is spliced # into the new location. def _type_modify_decl(self, decl, modifier): """ Tacks a type modifier on a declarator, and returns the modified declarator. Note: the declarator and modifier may be modified """ #~ print '****' #~ decl.show(offset=3) #~ modifier.show(offset=3) #~ print '****' modifier_head = modifier modifier_tail = modifier # The modifier may be a nested list. Reach its tail. while modifier_tail.type: modifier_tail = modifier_tail.type # If the decl is a basic type, just tack the modifier onto it. if isinstance(decl, c_ast.TypeDecl): modifier_tail.type = decl return modifier else: # Otherwise, the decl is a list of modifiers. Reach # its tail and splice the modifier onto the tail, # pointing to the underlying basic type. decl_tail = decl while not isinstance(decl_tail.type, c_ast.TypeDecl): decl_tail = decl_tail.type modifier_tail.type = decl_tail.type decl_tail.type = modifier_head return decl # Due to the order in which declarators are constructed, # they have to be fixed in order to look like a normal AST. # # When a declaration arrives from syntax construction, it has # these problems: # * The innermost TypeDecl has no type (because the basic # type is only known at the uppermost declaration level) # * The declaration has no variable name, since that is saved # in the innermost TypeDecl # * The typename of the declaration is a list of type # specifiers, and not a node. Here, basic identifier types # should be separated from more complex types like enums # and structs. # # This method fixes these problems. def _fix_decl_name_type(self, decl, typename): """ Fixes a declaration. Modifies decl. """ # Reach the underlying basic type # type = decl while not isinstance(type, c_ast.TypeDecl): type = type.type decl.name = type.declname type.quals = decl.quals[:] # The typename is a list of types. If any type in this # list isn't an IdentifierType, it must be the only # type in the list (it's illegal to declare "int enum ..") # If all the types are basic, they're collected in the # IdentifierType holder. for tn in typename: if not isinstance(tn, c_ast.IdentifierType): if len(typename) > 1: self._parse_error( "Invalid multiple types specified", tn.coord) else: type.type = tn return decl if not typename: # Functions default to returning int # if not isinstance(decl.type, c_ast.FuncDecl): self._parse_error( "Missing type in declaration", decl.coord) type.type = c_ast.IdentifierType( ['int'], coord=decl.coord) else: # At this point, we know that typename is a list of IdentifierType # nodes. Concatenate all the names into a single list. # type.type = c_ast.IdentifierType( [name for id in typename for name in id.names], coord=typename[0].coord) return decl def _add_declaration_specifier(self, declspec, newspec, kind, append=False): """ Declaration specifiers are represented by a dictionary with the entries: * qual: a list of type qualifiers * storage: a list of storage type qualifiers * type: a list of type specifiers * function: a list of function specifiers * alignment: a list of alignment specifiers This method is given a declaration specifier, and a new specifier of a given kind. If `append` is True, the new specifier is added to the end of the specifiers list, otherwise it's added at the beginning. Returns the declaration specifier, with the new specifier incorporated. """ spec = declspec or dict(qual=[], storage=[], type=[], function=[], alignment=[]) if append: spec[kind].append(newspec) else: spec[kind].insert(0, newspec) return spec def _build_declarations(self, spec, decls, typedef_namespace=False): """ Builds a list of declarations all sharing the given specifiers. If typedef_namespace is true, each declared name is added to the "typedef namespace", which also includes objects, functions, and enum constants. """ is_typedef = 'typedef' in spec['storage'] declarations = [] # Bit-fields are allowed to be unnamed. if decls[0].get('bitsize') is not None: pass # When redeclaring typedef names as identifiers in inner scopes, a # problem can occur where the identifier gets grouped into # spec['type'], leaving decl as None. This can only occur for the # first declarator. elif decls[0]['decl'] is None: if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \ not self._is_type_in_scope(spec['type'][-1].names[0]): coord = '?' for t in spec['type']: if hasattr(t, 'coord'): coord = t.coord break self._parse_error('Invalid declaration', coord) # Make this look as if it came from "direct_declarator:ID" decls[0]['decl'] = c_ast.TypeDecl( declname=spec['type'][-1].names[0], type=None, quals=None, align=spec['alignment'], coord=spec['type'][-1].coord) # Remove the "new" type's name from the end of spec['type'] del spec['type'][-1] # A similar problem can occur where the declaration ends up looking # like an abstract declarator. Give it a name if this is the case. elif not isinstance(decls[0]['decl'], ( c_ast.Enum, c_ast.Struct, c_ast.Union, c_ast.IdentifierType)): decls_0_tail = decls[0]['decl'] while not isinstance(decls_0_tail, c_ast.TypeDecl): decls_0_tail = decls_0_tail.type if decls_0_tail.declname is None: decls_0_tail.declname = spec['type'][-1].names[0] del spec['type'][-1] for decl in decls: assert decl['decl'] is not None if is_typedef: declaration = c_ast.Typedef( name=None, quals=spec['qual'], storage=spec['storage'], type=decl['decl'], coord=decl['decl'].coord) else: declaration = c_ast.Decl( name=None, quals=spec['qual'], align=spec['alignment'], storage=spec['storage'], funcspec=spec['function'], type=decl['decl'], init=decl.get('init'), bitsize=decl.get('bitsize'), coord=decl['decl'].coord) if isinstance(declaration.type, ( c_ast.Enum, c_ast.Struct, c_ast.Union, c_ast.IdentifierType)): fixed_decl = declaration else: fixed_decl = self._fix_decl_name_type(declaration, spec['type']) # Add the type name defined by typedef to a # symbol table (for usage in the lexer) if typedef_namespace: if is_typedef: self._add_typedef_name(fixed_decl.name, fixed_decl.coord) else: self._add_identifier(fixed_decl.name, fixed_decl.coord) fixed_decl = fix_atomic_specifiers(fixed_decl) declarations.append(fixed_decl) return declarations def _build_function_definition(self, spec, decl, param_decls, body): """ Builds a function definition. """ if 'typedef' in spec['storage']: self._parse_error("Invalid typedef", decl.coord) declaration = self._build_declarations( spec=spec, decls=[dict(decl=decl, init=None)], typedef_namespace=True)[0] return c_ast.FuncDef( decl=declaration, param_decls=param_decls, body=body, coord=decl.coord) def _select_struct_union_class(self, token): """ Given a token (either STRUCT or UNION), selects the appropriate AST class. """ if token == 'struct': return c_ast.Struct else: return c_ast.Union ## ## Precedence and associativity of operators ## # If this changes, c_generator.CGenerator.precedence_map needs to change as # well precedence = ( ('left', 'LOR'), ('left', 'LAND'), ('left', 'OR'), ('left', 'XOR'), ('left', 'AND'), ('left', 'EQ', 'NE'), ('left', 'GT', 'GE', 'LT', 'LE'), ('left', 'RSHIFT', 'LSHIFT'), ('left', 'PLUS', 'MINUS'), ('left', 'TIMES', 'DIVIDE', 'MOD') ) ## ## Grammar productions ## Implementation of the BNF defined in K&R2 A.13 ## # Wrapper around a translation unit, to allow for empty input. # Not strictly part of the C99 Grammar, but useful in practice. def p_translation_unit_or_empty(self, p): """ translation_unit_or_empty : translation_unit | empty """ if p[1] is None: p[0] = c_ast.FileAST([]) else: p[0] = c_ast.FileAST(p[1]) def p_translation_unit_1(self, p): """ translation_unit : external_declaration """ # Note: external_declaration is already a list p[0] = p[1] def p_translation_unit_2(self, p): """ translation_unit : translation_unit external_declaration """ p[1].extend(p[2]) p[0] = p[1] # Declarations always come as lists (because they can be # several in one line), so we wrap the function definition # into a list as well, to make the return value of # external_declaration homogeneous. def p_external_declaration_1(self, p): """ external_declaration : function_definition """ p[0] = [p[1]] def p_external_declaration_2(self, p): """ external_declaration : declaration """ p[0] = p[1] def p_external_declaration_3(self, p): """ external_declaration : pp_directive | pppragma_directive """ p[0] = [p[1]] def p_external_declaration_4(self, p): """ external_declaration : SEMI """ p[0] = [] def p_external_declaration_5(self, p): """ external_declaration : static_assert """ p[0] = p[1] def p_static_assert_declaration(self, p): """ static_assert : _STATIC_ASSERT LPAREN constant_expression COMMA unified_string_literal RPAREN | _STATIC_ASSERT LPAREN constant_expression RPAREN """ if len(p) == 5: p[0] = [c_ast.StaticAssert(p[3], None, self._token_coord(p, 1))] else: p[0] = [c_ast.StaticAssert(p[3], p[5], self._token_coord(p, 1))] def p_pp_directive(self, p): """ pp_directive : PPHASH """ self._parse_error('Directives not supported yet', self._token_coord(p, 1)) def p_pppragma_directive(self, p): """ pppragma_directive : PPPRAGMA | PPPRAGMA PPPRAGMASTR """ if len(p) == 3: p[0] = c_ast.Pragma(p[2], self._token_coord(p, 2)) else: p[0] = c_ast.Pragma("", self._token_coord(p, 1)) # In function definitions, the declarator can be followed by # a declaration list, for old "K&R style" function definitios. def p_function_definition_1(self, p): """ function_definition : id_declarator declaration_list_opt compound_statement """ # no declaration specifiers - 'int' becomes the default type spec = dict( qual=[], alignment=[], storage=[], type=[c_ast.IdentifierType(['int'], coord=self._token_coord(p, 1))], function=[]) p[0] = self._build_function_definition( spec=spec, decl=p[1], param_decls=p[2], body=p[3]) def p_function_definition_2(self, p): """ function_definition : declaration_specifiers id_declarator declaration_list_opt compound_statement """ spec = p[1] p[0] = self._build_function_definition( spec=spec, decl=p[2], param_decls=p[3], body=p[4]) # Note, according to C18 A.2.2 6.7.10 static_assert-declaration _Static_assert # is a declaration, not a statement. We additionally recognise it as a statement # to fix parsing of _Static_assert inside the functions. # def p_statement(self, p): """ statement : labeled_statement | expression_statement | compound_statement | selection_statement | iteration_statement | jump_statement | pppragma_directive | static_assert """ p[0] = p[1] # A pragma is generally considered a decorator rather than an actual # statement. Still, for the purposes of analyzing an abstract syntax tree of # C code, pragma's should not be ignored and were previously treated as a # statement. This presents a problem for constructs that take a statement # such as labeled_statements, selection_statements, and # iteration_statements, causing a misleading structure in the AST. For # example, consider the following C code. # # for (int i = 0; i < 3; i++) # #pragma omp critical # sum += 1; # # This code will compile and execute "sum += 1;" as the body of the for # loop. Previous implementations of PyCParser would render the AST for this # block of code as follows: # # For: # DeclList: # Decl: i, [], [], [] # TypeDecl: i, [] # IdentifierType: ['int'] # Constant: int, 0 # BinaryOp: < # ID: i # Constant: int, 3 # UnaryOp: p++ # ID: i # Pragma: omp critical # Assignment: += # ID: sum # Constant: int, 1 # # This AST misleadingly takes the Pragma as the body of the loop and the # assignment then becomes a sibling of the loop. # # To solve edge cases like these, the pragmacomp_or_statement rule groups # a pragma and its following statement (which would otherwise be orphaned) # using a compound block, effectively turning the above code into: # # for (int i = 0; i < 3; i++) { # #pragma omp critical # sum += 1; # } def p_pragmacomp_or_statement(self, p): """ pragmacomp_or_statement : pppragma_directive statement | statement """ if isinstance(p[1], c_ast.Pragma) and len(p) == 3: p[0] = c_ast.Compound( block_items=[p[1], p[2]], coord=self._token_coord(p, 1)) else: p[0] = p[1] # In C, declarations can come several in a line: # int x, *px, romulo = 5; # # However, for the AST, we will split them to separate Decl # nodes. # # This rule splits its declarations and always returns a list # of Decl nodes, even if it's one element long. # def p_decl_body(self, p): """ decl_body : declaration_specifiers init_declarator_list_opt | declaration_specifiers_no_type id_init_declarator_list_opt """ spec = p[1] # p[2] (init_declarator_list_opt) is either a list or None # if p[2] is None: # By the standard, you must have at least one declarator unless # declaring a structure tag, a union tag, or the members of an # enumeration. # ty = spec['type'] s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum) if len(ty) == 1 and isinstance(ty[0], s_u_or_e): decls = [c_ast.Decl( name=None, quals=spec['qual'], align=spec['alignment'], storage=spec['storage'], funcspec=spec['function'], type=ty[0], init=None, bitsize=None, coord=ty[0].coord)] # However, this case can also occur on redeclared identifiers in # an inner scope. The trouble is that the redeclared type's name # gets grouped into declaration_specifiers; _build_declarations # compensates for this. # else: decls = self._build_declarations( spec=spec, decls=[dict(decl=None, init=None)], typedef_namespace=True) else: decls = self._build_declarations( spec=spec, decls=p[2], typedef_namespace=True) p[0] = decls # The declaration has been split to a decl_body sub-rule and # SEMI, because having them in a single rule created a problem # for defining typedefs. # # If a typedef line was directly followed by a line using the # type defined with the typedef, the type would not be # recognized. This is because to reduce the declaration rule, # the parser's lookahead asked for the token after SEMI, which # was the type from the next line, and the lexer had no chance # to see the updated type symbol table. # # Splitting solves this problem, because after seeing SEMI, # the parser reduces decl_body, which actually adds the new # type into the table to be seen by the lexer before the next # line is reached. def p_declaration(self, p): """ declaration : decl_body SEMI """ p[0] = p[1] # Since each declaration is a list of declarations, this # rule will combine all the declarations and return a single # list # def p_declaration_list(self, p): """ declaration_list : declaration | declaration_list declaration """ p[0] = p[1] if len(p) == 2 else p[1] + p[2] # To know when declaration-specifiers end and declarators begin, # we require declaration-specifiers to have at least one # type-specifier, and disallow typedef-names after we've seen any # type-specifier. These are both required by the spec. # def p_declaration_specifiers_no_type_1(self, p): """ declaration_specifiers_no_type : type_qualifier declaration_specifiers_no_type_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'qual') def p_declaration_specifiers_no_type_2(self, p): """ declaration_specifiers_no_type : storage_class_specifier declaration_specifiers_no_type_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'storage') def p_declaration_specifiers_no_type_3(self, p): """ declaration_specifiers_no_type : function_specifier declaration_specifiers_no_type_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'function') # Without this, `typedef _Atomic(T) U` will parse incorrectly because the # _Atomic qualifier will match, instead of the specifier. def p_declaration_specifiers_no_type_4(self, p): """ declaration_specifiers_no_type : atomic_specifier declaration_specifiers_no_type_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'type') def p_declaration_specifiers_no_type_5(self, p): """ declaration_specifiers_no_type : alignment_specifier declaration_specifiers_no_type_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'alignment') def p_declaration_specifiers_1(self, p): """ declaration_specifiers : declaration_specifiers type_qualifier """ p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True) def p_declaration_specifiers_2(self, p): """ declaration_specifiers : declaration_specifiers storage_class_specifier """ p[0] = self._add_declaration_specifier(p[1], p[2], 'storage', append=True) def p_declaration_specifiers_3(self, p): """ declaration_specifiers : declaration_specifiers function_specifier """ p[0] = self._add_declaration_specifier(p[1], p[2], 'function', append=True) def p_declaration_specifiers_4(self, p): """ declaration_specifiers : declaration_specifiers type_specifier_no_typeid """ p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True) def p_declaration_specifiers_5(self, p): """ declaration_specifiers : type_specifier """ p[0] = self._add_declaration_specifier(None, p[1], 'type') def p_declaration_specifiers_6(self, p): """ declaration_specifiers : declaration_specifiers_no_type type_specifier """ p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True) def p_declaration_specifiers_7(self, p): """ declaration_specifiers : declaration_specifiers alignment_specifier """ p[0] = self._add_declaration_specifier(p[1], p[2], 'alignment', append=True) def p_storage_class_specifier(self, p): """ storage_class_specifier : AUTO | REGISTER | STATIC | EXTERN | TYPEDEF | _THREAD_LOCAL """ p[0] = p[1] def p_function_specifier(self, p): """ function_specifier : INLINE | _NORETURN """ p[0] = p[1] def p_type_specifier_no_typeid(self, p): """ type_specifier_no_typeid : VOID | _BOOL | CHAR | SHORT | INT | LONG | FLOAT | DOUBLE | _COMPLEX | SIGNED | UNSIGNED | __INT128 """ p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1)) def p_type_specifier(self, p): """ type_specifier : typedef_name | enum_specifier | struct_or_union_specifier | type_specifier_no_typeid | atomic_specifier """ p[0] = p[1] # See section 6.7.2.4 of the C11 standard. def p_atomic_specifier(self, p): """ atomic_specifier : _ATOMIC LPAREN type_name RPAREN """ typ = p[3] typ.quals.append('_Atomic') p[0] = typ def p_type_qualifier(self, p): """ type_qualifier : CONST | RESTRICT | VOLATILE | _ATOMIC """ p[0] = p[1] def p_init_declarator_list(self, p): """ init_declarator_list : init_declarator | init_declarator_list COMMA init_declarator """ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]] # Returns a {decl=<declarator> : init=<initializer>} dictionary # If there's no initializer, uses None # def p_init_declarator(self, p): """ init_declarator : declarator | declarator EQUALS initializer """ p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None)) def p_id_init_declarator_list(self, p): """ id_init_declarator_list : id_init_declarator | id_init_declarator_list COMMA init_declarator """ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]] def p_id_init_declarator(self, p): """ id_init_declarator : id_declarator | id_declarator EQUALS initializer """ p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None)) # Require at least one type specifier in a specifier-qualifier-list # def p_specifier_qualifier_list_1(self, p): """ specifier_qualifier_list : specifier_qualifier_list type_specifier_no_typeid """ p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True) def p_specifier_qualifier_list_2(self, p): """ specifier_qualifier_list : specifier_qualifier_list type_qualifier """ p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True) def p_specifier_qualifier_list_3(self, p): """ specifier_qualifier_list : type_specifier """ p[0] = self._add_declaration_specifier(None, p[1], 'type') def p_specifier_qualifier_list_4(self, p): """ specifier_qualifier_list : type_qualifier_list type_specifier """ p[0] = dict(qual=p[1], alignment=[], storage=[], type=[p[2]], function=[]) def p_specifier_qualifier_list_5(self, p): """ specifier_qualifier_list : alignment_specifier """ p[0] = dict(qual=[], alignment=[p[1]], storage=[], type=[], function=[]) def p_specifier_qualifier_list_6(self, p): """ specifier_qualifier_list : specifier_qualifier_list alignment_specifier """ p[0] = self._add_declaration_specifier(p[1], p[2], 'alignment') # TYPEID is allowed here (and in other struct/enum related tag names), because # struct/enum tags reside in their own namespace and can be named the same as types # def p_struct_or_union_specifier_1(self, p): """ struct_or_union_specifier : struct_or_union ID | struct_or_union TYPEID """ klass = self._select_struct_union_class(p[1]) # None means no list of members p[0] = klass( name=p[2], decls=None, coord=self._token_coord(p, 2)) def p_struct_or_union_specifier_2(self, p): """ struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close | struct_or_union brace_open brace_close """ klass = self._select_struct_union_class(p[1]) if len(p) == 4: # Empty sequence means an empty list of members p[0] = klass( name=None, decls=[], coord=self._token_coord(p, 2)) else: p[0] = klass( name=None, decls=p[3], coord=self._token_coord(p, 2)) def p_struct_or_union_specifier_3(self, p): """ struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close | struct_or_union ID brace_open brace_close | struct_or_union TYPEID brace_open struct_declaration_list brace_close | struct_or_union TYPEID brace_open brace_close """ klass = self._select_struct_union_class(p[1]) if len(p) == 5: # Empty sequence means an empty list of members p[0] = klass( name=p[2], decls=[], coord=self._token_coord(p, 2)) else: p[0] = klass( name=p[2], decls=p[4], coord=self._token_coord(p, 2)) def p_struct_or_union(self, p): """ struct_or_union : STRUCT | UNION """ p[0] = p[1] # Combine all declarations into a single list # def p_struct_declaration_list(self, p): """ struct_declaration_list : struct_declaration | struct_declaration_list struct_declaration """ if len(p) == 2: p[0] = p[1] or [] else: p[0] = p[1] + (p[2] or []) def p_struct_declaration_1(self, p): """ struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI """ spec = p[1] assert 'typedef' not in spec['storage'] if p[2] is not None: decls = self._build_declarations( spec=spec, decls=p[2]) elif len(spec['type']) == 1: # Anonymous struct/union, gcc extension, C1x feature. # Although the standard only allows structs/unions here, I see no # reason to disallow other types since some compilers have typedefs # here, and pycparser isn't about rejecting all invalid code. # node = spec['type'][0] if isinstance(node, c_ast.Node): decl_type = node else: decl_type = c_ast.IdentifierType(node) decls = self._build_declarations( spec=spec, decls=[dict(decl=decl_type)]) else: # Structure/union members can have the same names as typedefs. # The trouble is that the member's name gets grouped into # specifier_qualifier_list; _build_declarations compensates. # decls = self._build_declarations( spec=spec, decls=[dict(decl=None, init=None)]) p[0] = decls def p_struct_declaration_2(self, p): """ struct_declaration : SEMI """ p[0] = None def p_struct_declaration_3(self, p): """ struct_declaration : pppragma_directive """ p[0] = [p[1]] def p_struct_declarator_list(self, p): """ struct_declarator_list : struct_declarator | struct_declarator_list COMMA struct_declarator """ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]] # struct_declarator passes up a dict with the keys: decl (for # the underlying declarator) and bitsize (for the bitsize) # def p_struct_declarator_1(self, p): """ struct_declarator : declarator """ p[0] = {'decl': p[1], 'bitsize': None} def p_struct_declarator_2(self, p): """ struct_declarator : declarator COLON constant_expression | COLON constant_expression """ if len(p) > 3: p[0] = {'decl': p[1], 'bitsize': p[3]} else: p[0] = {'decl': c_ast.TypeDecl(None, None, None, None), 'bitsize': p[2]} def p_enum_specifier_1(self, p): """ enum_specifier : ENUM ID | ENUM TYPEID """ p[0] = c_ast.Enum(p[2], None, self._token_coord(p, 1)) def p_enum_specifier_2(self, p): """ enum_specifier : ENUM brace_open enumerator_list brace_close """ p[0] = c_ast.Enum(None, p[3], self._token_coord(p, 1)) def p_enum_specifier_3(self, p): """ enum_specifier : ENUM ID brace_open enumerator_list brace_close | ENUM TYPEID brace_open enumerator_list brace_close """ p[0] = c_ast.Enum(p[2], p[4], self._token_coord(p, 1)) def p_enumerator_list(self, p): """ enumerator_list : enumerator | enumerator_list COMMA | enumerator_list COMMA enumerator """ if len(p) == 2: p[0] = c_ast.EnumeratorList([p[1]], p[1].coord) elif len(p) == 3: p[0] = p[1] else: p[1].enumerators.append(p[3]) p[0] = p[1] def p_alignment_specifier(self, p): """ alignment_specifier : _ALIGNAS LPAREN type_name RPAREN | _ALIGNAS LPAREN constant_expression RPAREN """ p[0] = c_ast.Alignas(p[3], self._token_coord(p, 1)) def p_enumerator(self, p): """ enumerator : ID | ID EQUALS constant_expression """ if len(p) == 2: enumerator = c_ast.Enumerator( p[1], None, self._token_coord(p, 1)) else: enumerator = c_ast.Enumerator( p[1], p[3], self._token_coord(p, 1)) self._add_identifier(enumerator.name, enumerator.coord) p[0] = enumerator def p_declarator(self, p): """ declarator : id_declarator | typeid_declarator """ p[0] = p[1] @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) def p_xxx_declarator_1(self, p): """ xxx_declarator : direct_xxx_declarator """ p[0] = p[1] @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) def p_xxx_declarator_2(self, p): """ xxx_declarator : pointer direct_xxx_declarator """ p[0] = self._type_modify_decl(p[2], p[1]) @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) def p_direct_xxx_declarator_1(self, p): """ direct_xxx_declarator : yyy """ p[0] = c_ast.TypeDecl( declname=p[1], type=None, quals=None, align=None, coord=self._token_coord(p, 1)) @parameterized(('id', 'ID'), ('typeid', 'TYPEID')) def p_direct_xxx_declarator_2(self, p): """ direct_xxx_declarator : LPAREN xxx_declarator RPAREN """ p[0] = p[2] @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) def p_direct_xxx_declarator_3(self, p): """ direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET """ quals = (p[3] if len(p) > 5 else []) or [] # Accept dimension qualifiers # Per C99 6.7.5.3 p7 arr = c_ast.ArrayDecl( type=None, dim=p[4] if len(p) > 5 else p[3], dim_quals=quals, coord=p[1].coord) p[0] = self._type_modify_decl(decl=p[1], modifier=arr) @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) def p_direct_xxx_declarator_4(self, p): """ direct_xxx_declarator : direct_xxx_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET | direct_xxx_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET """ # Using slice notation for PLY objects doesn't work in Python 3 for the # version of PLY embedded with pycparser; see PLY Google Code issue 30. # Work around that here by listing the two elements separately. listed_quals = [item if isinstance(item, list) else [item] for item in [p[3],p[4]]] dim_quals = [qual for sublist in listed_quals for qual in sublist if qual is not None] arr = c_ast.ArrayDecl( type=None, dim=p[5], dim_quals=dim_quals, coord=p[1].coord) p[0] = self._type_modify_decl(decl=p[1], modifier=arr) # Special for VLAs # @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) def p_direct_xxx_declarator_5(self, p): """ direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET """ arr = c_ast.ArrayDecl( type=None, dim=c_ast.ID(p[4], self._token_coord(p, 4)), dim_quals=p[3] if p[3] is not None else [], coord=p[1].coord) p[0] = self._type_modify_decl(decl=p[1], modifier=arr) @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) def p_direct_xxx_declarator_6(self, p): """ direct_xxx_declarator : direct_xxx_declarator LPAREN parameter_type_list RPAREN | direct_xxx_declarator LPAREN identifier_list_opt RPAREN """ func = c_ast.FuncDecl( args=p[3], type=None, coord=p[1].coord) # To see why _get_yacc_lookahead_token is needed, consider: # typedef char TT; # void foo(int TT) { TT = 10; } # Outside the function, TT is a typedef, but inside (starting and # ending with the braces) it's a parameter. The trouble begins with # yacc's lookahead token. We don't know if we're declaring or # defining a function until we see LBRACE, but if we wait for yacc to # trigger a rule on that token, then TT will have already been read # and incorrectly interpreted as TYPEID. We need to add the # parameters to the scope the moment the lexer sees LBRACE. # if self._get_yacc_lookahead_token().type == "LBRACE": if func.args is not None: for param in func.args.params: if isinstance(param, c_ast.EllipsisParam): break self._add_identifier(param.name, param.coord) p[0] = self._type_modify_decl(decl=p[1], modifier=func) def p_pointer(self, p): """ pointer : TIMES type_qualifier_list_opt | TIMES type_qualifier_list_opt pointer """ coord = self._token_coord(p, 1) # Pointer decls nest from inside out. This is important when different # levels have different qualifiers. For example: # # char * const * p; # # Means "pointer to const pointer to char" # # While: # # char ** const p; # # Means "const pointer to pointer to char" # # So when we construct PtrDecl nestings, the leftmost pointer goes in # as the most nested type. nested_type = c_ast.PtrDecl(quals=p[2] or [], type=None, coord=coord) if len(p) > 3: tail_type = p[3] while tail_type.type is not None: tail_type = tail_type.type tail_type.type = nested_type p[0] = p[3] else: p[0] = nested_type def p_type_qualifier_list(self, p): """ type_qualifier_list : type_qualifier | type_qualifier_list type_qualifier """ p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]] def p_parameter_type_list(self, p): """ parameter_type_list : parameter_list | parameter_list COMMA ELLIPSIS """ if len(p) > 2: p[1].params.append(c_ast.EllipsisParam(self._token_coord(p, 3))) p[0] = p[1] def p_parameter_list(self, p): """ parameter_list : parameter_declaration | parameter_list COMMA parameter_declaration """ if len(p) == 2: # single parameter p[0] = c_ast.ParamList([p[1]], p[1].coord) else: p[1].params.append(p[3]) p[0] = p[1] # From ISO/IEC 9899:TC2, 6.7.5.3.11: # "If, in a parameter declaration, an identifier can be treated either # as a typedef name or as a parameter name, it shall be taken as a # typedef name." # # Inside a parameter declaration, once we've reduced declaration specifiers, # if we shift in an LPAREN and see a TYPEID, it could be either an abstract # declarator or a declarator nested inside parens. This rule tells us to # always treat it as an abstract declarator. Therefore, we only accept # `id_declarator`s and `typeid_noparen_declarator`s. def p_parameter_declaration_1(self, p): """ parameter_declaration : declaration_specifiers id_declarator | declaration_specifiers typeid_noparen_declarator """ spec = p[1] if not spec['type']: spec['type'] = [c_ast.IdentifierType(['int'], coord=self._token_coord(p, 1))] p[0] = self._build_declarations( spec=spec, decls=[dict(decl=p[2])])[0] def p_parameter_declaration_2(self, p): """ parameter_declaration : declaration_specifiers abstract_declarator_opt """ spec = p[1] if not spec['type']: spec['type'] = [c_ast.IdentifierType(['int'], coord=self._token_coord(p, 1))] # Parameters can have the same names as typedefs. The trouble is that # the parameter's name gets grouped into declaration_specifiers, making # it look like an old-style declaration; compensate. # if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \ self._is_type_in_scope(spec['type'][-1].names[0]): decl = self._build_declarations( spec=spec, decls=[dict(decl=p[2], init=None)])[0] # This truly is an old-style parameter declaration # else: decl = c_ast.Typename( name='', quals=spec['qual'], align=None, type=p[2] or c_ast.TypeDecl(None, None, None, None), coord=self._token_coord(p, 2)) typename = spec['type'] decl = self._fix_decl_name_type(decl, typename) p[0] = decl def p_identifier_list(self, p): """ identifier_list : identifier | identifier_list COMMA identifier """ if len(p) == 2: # single parameter p[0] = c_ast.ParamList([p[1]], p[1].coord) else: p[1].params.append(p[3]) p[0] = p[1] def p_initializer_1(self, p): """ initializer : assignment_expression """ p[0] = p[1] def p_initializer_2(self, p): """ initializer : brace_open initializer_list_opt brace_close | brace_open initializer_list COMMA brace_close """ if p[2] is None: p[0] = c_ast.InitList([], self._token_coord(p, 1)) else: p[0] = p[2] def p_initializer_list(self, p): """ initializer_list : designation_opt initializer | initializer_list COMMA designation_opt initializer """ if len(p) == 3: # single initializer init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2]) p[0] = c_ast.InitList([init], p[2].coord) else: init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4]) p[1].exprs.append(init) p[0] = p[1] def p_designation(self, p): """ designation : designator_list EQUALS """ p[0] = p[1] # Designators are represented as a list of nodes, in the order in which # they're written in the code. # def p_designator_list(self, p): """ designator_list : designator | designator_list designator """ p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]] def p_designator(self, p): """ designator : LBRACKET constant_expression RBRACKET | PERIOD identifier """ p[0] = p[2] def p_type_name(self, p): """ type_name : specifier_qualifier_list abstract_declarator_opt """ typename = c_ast.Typename( name='', quals=p[1]['qual'][:], align=None, type=p[2] or c_ast.TypeDecl(None, None, None, None), coord=self._token_coord(p, 2)) p[0] = self._fix_decl_name_type(typename, p[1]['type']) def p_abstract_declarator_1(self, p): """ abstract_declarator : pointer """ dummytype = c_ast.TypeDecl(None, None, None, None) p[0] = self._type_modify_decl( decl=dummytype, modifier=p[1]) def p_abstract_declarator_2(self, p): """ abstract_declarator : pointer direct_abstract_declarator """ p[0] = self._type_modify_decl(p[2], p[1]) def p_abstract_declarator_3(self, p): """ abstract_declarator : direct_abstract_declarator """ p[0] = p[1] # Creating and using direct_abstract_declarator_opt here # instead of listing both direct_abstract_declarator and the # lack of it in the beginning of _1 and _2 caused two # shift/reduce errors. # def p_direct_abstract_declarator_1(self, p): """ direct_abstract_declarator : LPAREN abstract_declarator RPAREN """ p[0] = p[2] def p_direct_abstract_declarator_2(self, p): """ direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET """ arr = c_ast.ArrayDecl( type=None, dim=p[3], dim_quals=[], coord=p[1].coord) p[0] = self._type_modify_decl(decl=p[1], modifier=arr) def p_direct_abstract_declarator_3(self, p): """ direct_abstract_declarator : LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET """ quals = (p[2] if len(p) > 4 else []) or [] p[0] = c_ast.ArrayDecl( type=c_ast.TypeDecl(None, None, None, None), dim=p[3] if len(p) > 4 else p[2], dim_quals=quals, coord=self._token_coord(p, 1)) def p_direct_abstract_declarator_4(self, p): """ direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET """ arr = c_ast.ArrayDecl( type=None, dim=c_ast.ID(p[3], self._token_coord(p, 3)), dim_quals=[], coord=p[1].coord) p[0] = self._type_modify_decl(decl=p[1], modifier=arr) def p_direct_abstract_declarator_5(self, p): """ direct_abstract_declarator : LBRACKET TIMES RBRACKET """ p[0] = c_ast.ArrayDecl( type=c_ast.TypeDecl(None, None, None, None), dim=c_ast.ID(p[3], self._token_coord(p, 3)), dim_quals=[], coord=self._token_coord(p, 1)) def p_direct_abstract_declarator_6(self, p): """ direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN """ func = c_ast.FuncDecl( args=p[3], type=None, coord=p[1].coord) p[0] = self._type_modify_decl(decl=p[1], modifier=func) def p_direct_abstract_declarator_7(self, p): """ direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN """ p[0] = c_ast.FuncDecl( args=p[2], type=c_ast.TypeDecl(None, None, None, None), coord=self._token_coord(p, 1)) # declaration is a list, statement isn't. To make it consistent, block_item # will always be a list # def p_block_item(self, p): """ block_item : declaration | statement """ p[0] = p[1] if isinstance(p[1], list) else [p[1]] # Since we made block_item a list, this just combines lists # def p_block_item_list(self, p): """ block_item_list : block_item | block_item_list block_item """ # Empty block items (plain ';') produce [None], so ignore them p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2] def p_compound_statement_1(self, p): """ compound_statement : brace_open block_item_list_opt brace_close """ p[0] = c_ast.Compound( block_items=p[2], coord=self._token_coord(p, 1)) def p_labeled_statement_1(self, p): """ labeled_statement : ID COLON pragmacomp_or_statement """ p[0] = c_ast.Label(p[1], p[3], self._token_coord(p, 1)) def p_labeled_statement_2(self, p): """ labeled_statement : CASE constant_expression COLON pragmacomp_or_statement """ p[0] = c_ast.Case(p[2], [p[4]], self._token_coord(p, 1)) def p_labeled_statement_3(self, p): """ labeled_statement : DEFAULT COLON pragmacomp_or_statement """ p[0] = c_ast.Default([p[3]], self._token_coord(p, 1)) def p_selection_statement_1(self, p): """ selection_statement : IF LPAREN expression RPAREN pragmacomp_or_statement """ p[0] = c_ast.If(p[3], p[5], None, self._token_coord(p, 1)) def p_selection_statement_2(self, p): """ selection_statement : IF LPAREN expression RPAREN statement ELSE pragmacomp_or_statement """ p[0] = c_ast.If(p[3], p[5], p[7], self._token_coord(p, 1)) def p_selection_statement_3(self, p): """ selection_statement : SWITCH LPAREN expression RPAREN pragmacomp_or_statement """ p[0] = fix_switch_cases( c_ast.Switch(p[3], p[5], self._token_coord(p, 1))) def p_iteration_statement_1(self, p): """ iteration_statement : WHILE LPAREN expression RPAREN pragmacomp_or_statement """ p[0] = c_ast.While(p[3], p[5], self._token_coord(p, 1)) def p_iteration_statement_2(self, p): """ iteration_statement : DO pragmacomp_or_statement WHILE LPAREN expression RPAREN SEMI """ p[0] = c_ast.DoWhile(p[5], p[2], self._token_coord(p, 1)) def p_iteration_statement_3(self, p): """ iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """ p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._token_coord(p, 1)) def p_iteration_statement_4(self, p): """ iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """ p[0] = c_ast.For(c_ast.DeclList(p[3], self._token_coord(p, 1)), p[4], p[6], p[8], self._token_coord(p, 1)) def p_jump_statement_1(self, p): """ jump_statement : GOTO ID SEMI """ p[0] = c_ast.Goto(p[2], self._token_coord(p, 1)) def p_jump_statement_2(self, p): """ jump_statement : BREAK SEMI """ p[0] = c_ast.Break(self._token_coord(p, 1)) def p_jump_statement_3(self, p): """ jump_statement : CONTINUE SEMI """ p[0] = c_ast.Continue(self._token_coord(p, 1)) def p_jump_statement_4(self, p): """ jump_statement : RETURN expression SEMI | RETURN SEMI """ p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._token_coord(p, 1)) def p_expression_statement(self, p): """ expression_statement : expression_opt SEMI """ if p[1] is None: p[0] = c_ast.EmptyStatement(self._token_coord(p, 2)) else: p[0] = p[1] def p_expression(self, p): """ expression : assignment_expression | expression COMMA assignment_expression """ if len(p) == 2: p[0] = p[1] else: if not isinstance(p[1], c_ast.ExprList): p[1] = c_ast.ExprList([p[1]], p[1].coord) p[1].exprs.append(p[3]) p[0] = p[1] def p_parenthesized_compound_expression(self, p): """ assignment_expression : LPAREN compound_statement RPAREN """ p[0] = p[2] def p_typedef_name(self, p): """ typedef_name : TYPEID """ p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1)) def p_assignment_expression(self, p): """ assignment_expression : conditional_expression | unary_expression assignment_operator assignment_expression """ if len(p) == 2: p[0] = p[1] else: p[0] = c_ast.Assignment(p[2], p[1], p[3], p[1].coord) # K&R2 defines these as many separate rules, to encode # precedence and associativity. Why work hard ? I'll just use # the built in precedence/associativity specification feature # of PLY. (see precedence declaration above) # def p_assignment_operator(self, p): """ assignment_operator : EQUALS | XOREQUAL | TIMESEQUAL | DIVEQUAL | MODEQUAL | PLUSEQUAL | MINUSEQUAL | LSHIFTEQUAL | RSHIFTEQUAL | ANDEQUAL | OREQUAL """ p[0] = p[1] def p_constant_expression(self, p): """ constant_expression : conditional_expression """ p[0] = p[1] def p_conditional_expression(self, p): """ conditional_expression : binary_expression | binary_expression CONDOP expression COLON conditional_expression """ if len(p) == 2: p[0] = p[1] else: p[0] = c_ast.TernaryOp(p[1], p[3], p[5], p[1].coord) def p_binary_expression(self, p): """ binary_expression : cast_expression | binary_expression TIMES binary_expression | binary_expression DIVIDE binary_expression | binary_expression MOD binary_expression | binary_expression PLUS binary_expression | binary_expression MINUS binary_expression | binary_expression RSHIFT binary_expression | binary_expression LSHIFT binary_expression | binary_expression LT binary_expression | binary_expression LE binary_expression | binary_expression GE binary_expression | binary_expression GT binary_expression | binary_expression EQ binary_expression | binary_expression NE binary_expression | binary_expression AND binary_expression | binary_expression OR binary_expression | binary_expression XOR binary_expression | binary_expression LAND binary_expression | binary_expression LOR binary_expression """ if len(p) == 2: p[0] = p[1] else: p[0] = c_ast.BinaryOp(p[2], p[1], p[3], p[1].coord) def p_cast_expression_1(self, p): """ cast_expression : unary_expression """ p[0] = p[1] def p_cast_expression_2(self, p): """ cast_expression : LPAREN type_name RPAREN cast_expression """ p[0] = c_ast.Cast(p[2], p[4], self._token_coord(p, 1)) def p_unary_expression_1(self, p): """ unary_expression : postfix_expression """ p[0] = p[1] def p_unary_expression_2(self, p): """ unary_expression : PLUSPLUS unary_expression | MINUSMINUS unary_expression | unary_operator cast_expression """ p[0] = c_ast.UnaryOp(p[1], p[2], p[2].coord) def p_unary_expression_3(self, p): """ unary_expression : SIZEOF unary_expression | SIZEOF LPAREN type_name RPAREN | _ALIGNOF LPAREN type_name RPAREN """ p[0] = c_ast.UnaryOp( p[1], p[2] if len(p) == 3 else p[3], self._token_coord(p, 1)) def p_unary_operator(self, p): """ unary_operator : AND | TIMES | PLUS | MINUS | NOT | LNOT """ p[0] = p[1] def p_postfix_expression_1(self, p): """ postfix_expression : primary_expression """ p[0] = p[1] def p_postfix_expression_2(self, p): """ postfix_expression : postfix_expression LBRACKET expression RBRACKET """ p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord) def p_postfix_expression_3(self, p): """ postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN | postfix_expression LPAREN RPAREN """ p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord) def p_postfix_expression_4(self, p): """ postfix_expression : postfix_expression PERIOD ID | postfix_expression PERIOD TYPEID | postfix_expression ARROW ID | postfix_expression ARROW TYPEID """ field = c_ast.ID(p[3], self._token_coord(p, 3)) p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord) def p_postfix_expression_5(self, p): """ postfix_expression : postfix_expression PLUSPLUS | postfix_expression MINUSMINUS """ p[0] = c_ast.UnaryOp('p' + p[2], p[1], p[1].coord) def p_postfix_expression_6(self, p): """ postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close | LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close """ p[0] = c_ast.CompoundLiteral(p[2], p[5]) def p_primary_expression_1(self, p): """ primary_expression : identifier """ p[0] = p[1] def p_primary_expression_2(self, p): """ primary_expression : constant """ p[0] = p[1] def p_primary_expression_3(self, p): """ primary_expression : unified_string_literal | unified_wstring_literal """ p[0] = p[1] def p_primary_expression_4(self, p): """ primary_expression : LPAREN expression RPAREN """ p[0] = p[2] def p_primary_expression_5(self, p): """ primary_expression : OFFSETOF LPAREN type_name COMMA offsetof_member_designator RPAREN """ coord = self._token_coord(p, 1) p[0] = c_ast.FuncCall(c_ast.ID(p[1], coord), c_ast.ExprList([p[3], p[5]], coord), coord) def p_offsetof_member_designator(self, p): """ offsetof_member_designator : identifier | offsetof_member_designator PERIOD identifier | offsetof_member_designator LBRACKET expression RBRACKET """ if len(p) == 2: p[0] = p[1] elif len(p) == 4: p[0] = c_ast.StructRef(p[1], p[2], p[3], p[1].coord) elif len(p) == 5: p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord) else: raise NotImplementedError("Unexpected parsing state. len(p): %u" % len(p)) def p_argument_expression_list(self, p): """ argument_expression_list : assignment_expression | argument_expression_list COMMA assignment_expression """ if len(p) == 2: # single expr p[0] = c_ast.ExprList([p[1]], p[1].coord) else: p[1].exprs.append(p[3]) p[0] = p[1] def p_identifier(self, p): """ identifier : ID """ p[0] = c_ast.ID(p[1], self._token_coord(p, 1)) def p_constant_1(self, p): """ constant : INT_CONST_DEC | INT_CONST_OCT | INT_CONST_HEX | INT_CONST_BIN | INT_CONST_CHAR """ uCount = 0 lCount = 0 for x in p[1][-3:]: if x in ('l', 'L'): lCount += 1 elif x in ('u', 'U'): uCount += 1 t = '' if uCount > 1: raise ValueError('Constant cannot have more than one u/U suffix.') elif lCount > 2: raise ValueError('Constant cannot have more than two l/L suffix.') prefix = 'unsigned ' * uCount + 'long ' * lCount p[0] = c_ast.Constant( prefix + 'int', p[1], self._token_coord(p, 1)) def p_constant_2(self, p): """ constant : FLOAT_CONST | HEX_FLOAT_CONST """ if 'x' in p[1].lower(): t = 'float' else: if p[1][-1] in ('f', 'F'): t = 'float' elif p[1][-1] in ('l', 'L'): t = 'long double' else: t = 'double' p[0] = c_ast.Constant( t, p[1], self._token_coord(p, 1)) def p_constant_3(self, p): """ constant : CHAR_CONST | WCHAR_CONST | U8CHAR_CONST | U16CHAR_CONST | U32CHAR_CONST """ p[0] = c_ast.Constant( 'char', p[1], self._token_coord(p, 1)) # The "unified" string and wstring literal rules are for supporting # concatenation of adjacent string literals. # I.e. "hello " "world" is seen by the C compiler as a single string literal # with the value "hello world" # def p_unified_string_literal(self, p): """ unified_string_literal : STRING_LITERAL | unified_string_literal STRING_LITERAL """ if len(p) == 2: # single literal p[0] = c_ast.Constant( 'string', p[1], self._token_coord(p, 1)) else: p[1].value = p[1].value[:-1] + p[2][1:] p[0] = p[1] def p_unified_wstring_literal(self, p): """ unified_wstring_literal : WSTRING_LITERAL | U8STRING_LITERAL | U16STRING_LITERAL | U32STRING_LITERAL | unified_wstring_literal WSTRING_LITERAL | unified_wstring_literal U8STRING_LITERAL | unified_wstring_literal U16STRING_LITERAL | unified_wstring_literal U32STRING_LITERAL """ if len(p) == 2: # single literal p[0] = c_ast.Constant( 'string', p[1], self._token_coord(p, 1)) else: p[1].value = p[1].value.rstrip()[:-1] + p[2][2:] p[0] = p[1] def p_brace_open(self, p): """ brace_open : LBRACE """ p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_brace_close(self, p): """ brace_close : RBRACE """ p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_empty(self, p): 'empty : ' p[0] = None def p_error(self, p): # If error recovery is added here in the future, make sure # _get_yacc_lookahead_token still works! # if p: self._parse_error( 'before: %s' % p.value, self._coord(lineno=p.lineno, column=self.clex.find_tok_column(p))) else: self._parse_error('At end of input', self.clex.filename)
73,680
Python
37.03872
134
0.525841
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/c_generator.py
#------------------------------------------------------------------------------ # pycparser: c_generator.py # # C code generator from pycparser AST nodes. # # Eli Bendersky [https://eli.thegreenplace.net/] # License: BSD #------------------------------------------------------------------------------ from . import c_ast class CGenerator(object): """ Uses the same visitor pattern as c_ast.NodeVisitor, but modified to return a value from each visit method, using string accumulation in generic_visit. """ def __init__(self, reduce_parentheses=False): """ Constructs C-code generator reduce_parentheses: if True, eliminates needless parentheses on binary operators """ # Statements start with indentation of self.indent_level spaces, using # the _make_indent method. self.indent_level = 0 self.reduce_parentheses = reduce_parentheses def _make_indent(self): return ' ' * self.indent_level def visit(self, node): method = 'visit_' + node.__class__.__name__ return getattr(self, method, self.generic_visit)(node) def generic_visit(self, node): if node is None: return '' else: return ''.join(self.visit(c) for c_name, c in node.children()) def visit_Constant(self, n): return n.value def visit_ID(self, n): return n.name def visit_Pragma(self, n): ret = '#pragma' if n.string: ret += ' ' + n.string return ret def visit_ArrayRef(self, n): arrref = self._parenthesize_unless_simple(n.name) return arrref + '[' + self.visit(n.subscript) + ']' def visit_StructRef(self, n): sref = self._parenthesize_unless_simple(n.name) return sref + n.type + self.visit(n.field) def visit_FuncCall(self, n): fref = self._parenthesize_unless_simple(n.name) return fref + '(' + self.visit(n.args) + ')' def visit_UnaryOp(self, n): if n.op == 'sizeof': # Always parenthesize the argument of sizeof since it can be # a name. return 'sizeof(%s)' % self.visit(n.expr) else: operand = self._parenthesize_unless_simple(n.expr) if n.op == 'p++': return '%s++' % operand elif n.op == 'p--': return '%s--' % operand else: return '%s%s' % (n.op, operand) # Precedence map of binary operators: precedence_map = { # Should be in sync with c_parser.CParser.precedence # Higher numbers are stronger binding '||': 0, # weakest binding '&&': 1, '|': 2, '^': 3, '&': 4, '==': 5, '!=': 5, '>': 6, '>=': 6, '<': 6, '<=': 6, '>>': 7, '<<': 7, '+': 8, '-': 8, '*': 9, '/': 9, '%': 9 # strongest binding } def visit_BinaryOp(self, n): # Note: all binary operators are left-to-right associative # # If `n.left.op` has a stronger or equally binding precedence in # comparison to `n.op`, no parenthesis are needed for the left: # e.g., `(a*b) + c` is equivalent to `a*b + c`, as well as # `(a+b) - c` is equivalent to `a+b - c` (same precedence). # If the left operator is weaker binding than the current, then # parentheses are necessary: # e.g., `(a+b) * c` is NOT equivalent to `a+b * c`. lval_str = self._parenthesize_if( n.left, lambda d: not (self._is_simple_node(d) or self.reduce_parentheses and isinstance(d, c_ast.BinaryOp) and self.precedence_map[d.op] >= self.precedence_map[n.op])) # If `n.right.op` has a stronger -but not equal- binding precedence, # parenthesis can be omitted on the right: # e.g., `a + (b*c)` is equivalent to `a + b*c`. # If the right operator is weaker or equally binding, then parentheses # are necessary: # e.g., `a * (b+c)` is NOT equivalent to `a * b+c` and # `a - (b+c)` is NOT equivalent to `a - b+c` (same precedence). rval_str = self._parenthesize_if( n.right, lambda d: not (self._is_simple_node(d) or self.reduce_parentheses and isinstance(d, c_ast.BinaryOp) and self.precedence_map[d.op] > self.precedence_map[n.op])) return '%s %s %s' % (lval_str, n.op, rval_str) def visit_Assignment(self, n): rval_str = self._parenthesize_if( n.rvalue, lambda n: isinstance(n, c_ast.Assignment)) return '%s %s %s' % (self.visit(n.lvalue), n.op, rval_str) def visit_IdentifierType(self, n): return ' '.join(n.names) def _visit_expr(self, n): if isinstance(n, c_ast.InitList): return '{' + self.visit(n) + '}' elif isinstance(n, c_ast.ExprList): return '(' + self.visit(n) + ')' else: return self.visit(n) def visit_Decl(self, n, no_type=False): # no_type is used when a Decl is part of a DeclList, where the type is # explicitly only for the first declaration in a list. # s = n.name if no_type else self._generate_decl(n) if n.bitsize: s += ' : ' + self.visit(n.bitsize) if n.init: s += ' = ' + self._visit_expr(n.init) return s def visit_DeclList(self, n): s = self.visit(n.decls[0]) if len(n.decls) > 1: s += ', ' + ', '.join(self.visit_Decl(decl, no_type=True) for decl in n.decls[1:]) return s def visit_Typedef(self, n): s = '' if n.storage: s += ' '.join(n.storage) + ' ' s += self._generate_type(n.type) return s def visit_Cast(self, n): s = '(' + self._generate_type(n.to_type, emit_declname=False) + ')' return s + ' ' + self._parenthesize_unless_simple(n.expr) def visit_ExprList(self, n): visited_subexprs = [] for expr in n.exprs: visited_subexprs.append(self._visit_expr(expr)) return ', '.join(visited_subexprs) def visit_InitList(self, n): visited_subexprs = [] for expr in n.exprs: visited_subexprs.append(self._visit_expr(expr)) return ', '.join(visited_subexprs) def visit_Enum(self, n): return self._generate_struct_union_enum(n, name='enum') def visit_Alignas(self, n): return '_Alignas({})'.format(self.visit(n.alignment)) def visit_Enumerator(self, n): if not n.value: return '{indent}{name},\n'.format( indent=self._make_indent(), name=n.name, ) else: return '{indent}{name} = {value},\n'.format( indent=self._make_indent(), name=n.name, value=self.visit(n.value), ) def visit_FuncDef(self, n): decl = self.visit(n.decl) self.indent_level = 0 body = self.visit(n.body) if n.param_decls: knrdecls = ';\n'.join(self.visit(p) for p in n.param_decls) return decl + '\n' + knrdecls + ';\n' + body + '\n' else: return decl + '\n' + body + '\n' def visit_FileAST(self, n): s = '' for ext in n.ext: if isinstance(ext, c_ast.FuncDef): s += self.visit(ext) elif isinstance(ext, c_ast.Pragma): s += self.visit(ext) + '\n' else: s += self.visit(ext) + ';\n' return s def visit_Compound(self, n): s = self._make_indent() + '{\n' self.indent_level += 2 if n.block_items: s += ''.join(self._generate_stmt(stmt) for stmt in n.block_items) self.indent_level -= 2 s += self._make_indent() + '}\n' return s def visit_CompoundLiteral(self, n): return '(' + self.visit(n.type) + '){' + self.visit(n.init) + '}' def visit_EmptyStatement(self, n): return ';' def visit_ParamList(self, n): return ', '.join(self.visit(param) for param in n.params) def visit_Return(self, n): s = 'return' if n.expr: s += ' ' + self.visit(n.expr) return s + ';' def visit_Break(self, n): return 'break;' def visit_Continue(self, n): return 'continue;' def visit_TernaryOp(self, n): s = '(' + self._visit_expr(n.cond) + ') ? ' s += '(' + self._visit_expr(n.iftrue) + ') : ' s += '(' + self._visit_expr(n.iffalse) + ')' return s def visit_If(self, n): s = 'if (' if n.cond: s += self.visit(n.cond) s += ')\n' s += self._generate_stmt(n.iftrue, add_indent=True) if n.iffalse: s += self._make_indent() + 'else\n' s += self._generate_stmt(n.iffalse, add_indent=True) return s def visit_For(self, n): s = 'for (' if n.init: s += self.visit(n.init) s += ';' if n.cond: s += ' ' + self.visit(n.cond) s += ';' if n.next: s += ' ' + self.visit(n.next) s += ')\n' s += self._generate_stmt(n.stmt, add_indent=True) return s def visit_While(self, n): s = 'while (' if n.cond: s += self.visit(n.cond) s += ')\n' s += self._generate_stmt(n.stmt, add_indent=True) return s def visit_DoWhile(self, n): s = 'do\n' s += self._generate_stmt(n.stmt, add_indent=True) s += self._make_indent() + 'while (' if n.cond: s += self.visit(n.cond) s += ');' return s def visit_StaticAssert(self, n): s = '_Static_assert(' s += self.visit(n.cond) if n.message: s += ',' s += self.visit(n.message) s += ')' return s def visit_Switch(self, n): s = 'switch (' + self.visit(n.cond) + ')\n' s += self._generate_stmt(n.stmt, add_indent=True) return s def visit_Case(self, n): s = 'case ' + self.visit(n.expr) + ':\n' for stmt in n.stmts: s += self._generate_stmt(stmt, add_indent=True) return s def visit_Default(self, n): s = 'default:\n' for stmt in n.stmts: s += self._generate_stmt(stmt, add_indent=True) return s def visit_Label(self, n): return n.name + ':\n' + self._generate_stmt(n.stmt) def visit_Goto(self, n): return 'goto ' + n.name + ';' def visit_EllipsisParam(self, n): return '...' def visit_Struct(self, n): return self._generate_struct_union_enum(n, 'struct') def visit_Typename(self, n): return self._generate_type(n.type) def visit_Union(self, n): return self._generate_struct_union_enum(n, 'union') def visit_NamedInitializer(self, n): s = '' for name in n.name: if isinstance(name, c_ast.ID): s += '.' + name.name else: s += '[' + self.visit(name) + ']' s += ' = ' + self._visit_expr(n.expr) return s def visit_FuncDecl(self, n): return self._generate_type(n) def visit_ArrayDecl(self, n): return self._generate_type(n, emit_declname=False) def visit_TypeDecl(self, n): return self._generate_type(n, emit_declname=False) def visit_PtrDecl(self, n): return self._generate_type(n, emit_declname=False) def _generate_struct_union_enum(self, n, name): """ Generates code for structs, unions, and enums. name should be 'struct', 'union', or 'enum'. """ if name in ('struct', 'union'): members = n.decls body_function = self._generate_struct_union_body else: assert name == 'enum' members = None if n.values is None else n.values.enumerators body_function = self._generate_enum_body s = name + ' ' + (n.name or '') if members is not None: # None means no members # Empty sequence means an empty list of members s += '\n' s += self._make_indent() self.indent_level += 2 s += '{\n' s += body_function(members) self.indent_level -= 2 s += self._make_indent() + '}' return s def _generate_struct_union_body(self, members): return ''.join(self._generate_stmt(decl) for decl in members) def _generate_enum_body(self, members): # `[:-2] + '\n'` removes the final `,` from the enumerator list return ''.join(self.visit(value) for value in members)[:-2] + '\n' def _generate_stmt(self, n, add_indent=False): """ Generation from a statement node. This method exists as a wrapper for individual visit_* methods to handle different treatment of some statements in this context. """ typ = type(n) if add_indent: self.indent_level += 2 indent = self._make_indent() if add_indent: self.indent_level -= 2 if typ in ( c_ast.Decl, c_ast.Assignment, c_ast.Cast, c_ast.UnaryOp, c_ast.BinaryOp, c_ast.TernaryOp, c_ast.FuncCall, c_ast.ArrayRef, c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef, c_ast.ExprList): # These can also appear in an expression context so no semicolon # is added to them automatically # return indent + self.visit(n) + ';\n' elif typ in (c_ast.Compound,): # No extra indentation required before the opening brace of a # compound - because it consists of multiple lines it has to # compute its own indentation. # return self.visit(n) elif typ in (c_ast.If,): return indent + self.visit(n) else: return indent + self.visit(n) + '\n' def _generate_decl(self, n): """ Generation from a Decl node. """ s = '' if n.funcspec: s = ' '.join(n.funcspec) + ' ' if n.storage: s += ' '.join(n.storage) + ' ' if n.align: s += self.visit(n.align[0]) + ' ' s += self._generate_type(n.type) return s def _generate_type(self, n, modifiers=[], emit_declname = True): """ Recursive generation from a type node. n is the type node. modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers encountered on the way down to a TypeDecl, to allow proper generation from it. """ typ = type(n) #~ print(n, modifiers) if typ == c_ast.TypeDecl: s = '' if n.quals: s += ' '.join(n.quals) + ' ' s += self.visit(n.type) nstr = n.declname if n.declname and emit_declname else '' # Resolve modifiers. # Wrap in parens to distinguish pointer to array and pointer to # function syntax. # for i, modifier in enumerate(modifiers): if isinstance(modifier, c_ast.ArrayDecl): if (i != 0 and isinstance(modifiers[i - 1], c_ast.PtrDecl)): nstr = '(' + nstr + ')' nstr += '[' if modifier.dim_quals: nstr += ' '.join(modifier.dim_quals) + ' ' nstr += self.visit(modifier.dim) + ']' elif isinstance(modifier, c_ast.FuncDecl): if (i != 0 and isinstance(modifiers[i - 1], c_ast.PtrDecl)): nstr = '(' + nstr + ')' nstr += '(' + self.visit(modifier.args) + ')' elif isinstance(modifier, c_ast.PtrDecl): if modifier.quals: nstr = '* %s%s' % (' '.join(modifier.quals), ' ' + nstr if nstr else '') else: nstr = '*' + nstr if nstr: s += ' ' + nstr return s elif typ == c_ast.Decl: return self._generate_decl(n.type) elif typ == c_ast.Typename: return self._generate_type(n.type, emit_declname = emit_declname) elif typ == c_ast.IdentifierType: return ' '.join(n.names) + ' ' elif typ in (c_ast.ArrayDecl, c_ast.PtrDecl, c_ast.FuncDecl): return self._generate_type(n.type, modifiers + [n], emit_declname = emit_declname) else: return self.visit(n) def _parenthesize_if(self, n, condition): """ Visits 'n' and returns its string representation, parenthesized if the condition function applied to the node returns True. """ s = self._visit_expr(n) if condition(n): return '(' + s + ')' else: return s def _parenthesize_unless_simple(self, n): """ Common use case for _parenthesize_if """ return self._parenthesize_if(n, lambda d: not self._is_simple_node(d)) def _is_simple_node(self, n): """ Returns True for nodes that are "simple" - i.e. nodes that always have higher precedence than operators. """ return isinstance(n, (c_ast.Constant, c_ast.ID, c_ast.ArrayRef, c_ast.StructRef, c_ast.FuncCall))
17,772
Python
34.333996
83
0.507934
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/plyparser.py
#----------------------------------------------------------------- # plyparser.py # # PLYParser class and other utilities for simplifying programming # parsers with PLY # # Eli Bendersky [https://eli.thegreenplace.net/] # License: BSD #----------------------------------------------------------------- import warnings class Coord(object): """ Coordinates of a syntactic element. Consists of: - File name - Line number - (optional) column number, for the Lexer """ __slots__ = ('file', 'line', 'column', '__weakref__') def __init__(self, file, line, column=None): self.file = file self.line = line self.column = column def __str__(self): str = "%s:%s" % (self.file, self.line) if self.column: str += ":%s" % self.column return str class ParseError(Exception): pass class PLYParser(object): def _create_opt_rule(self, rulename): """ Given a rule name, creates an optional ply.yacc rule for it. The name of the optional rule is <rulename>_opt """ optname = rulename + '_opt' def optrule(self, p): p[0] = p[1] optrule.__doc__ = '%s : empty\n| %s' % (optname, rulename) optrule.__name__ = 'p_%s' % optname setattr(self.__class__, optrule.__name__, optrule) def _coord(self, lineno, column=None): return Coord( file=self.clex.filename, line=lineno, column=column) def _token_coord(self, p, token_idx): """ Returns the coordinates for the YaccProduction object 'p' indexed with 'token_idx'. The coordinate includes the 'lineno' and 'column'. Both follow the lex semantic, starting from 1. """ last_cr = p.lexer.lexer.lexdata.rfind('\n', 0, p.lexpos(token_idx)) if last_cr < 0: last_cr = -1 column = (p.lexpos(token_idx) - (last_cr)) return self._coord(p.lineno(token_idx), column) def _parse_error(self, msg, coord): raise ParseError("%s: %s" % (coord, msg)) def parameterized(*params): """ Decorator to create parameterized rules. Parameterized rule methods must be named starting with 'p_' and contain 'xxx', and their docstrings may contain 'xxx' and 'yyy'. These will be replaced by the given parameter tuples. For example, ``p_xxx_rule()`` with docstring 'xxx_rule : yyy' when decorated with ``@parameterized(('id', 'ID'))`` produces ``p_id_rule()`` with the docstring 'id_rule : ID'. Using multiple tuples produces multiple rules. """ def decorate(rule_func): rule_func._params = params return rule_func return decorate def template(cls): """ Class decorator to generate rules from parameterized rule templates. See `parameterized` for more information on parameterized rules. """ issued_nodoc_warning = False for attr_name in dir(cls): if attr_name.startswith('p_'): method = getattr(cls, attr_name) if hasattr(method, '_params'): # Remove the template method delattr(cls, attr_name) # Create parameterized rules from this method; only run this if # the method has a docstring. This is to address an issue when # pycparser's users are installed in -OO mode which strips # docstrings away. # See: https://github.com/eliben/pycparser/pull/198/ and # https://github.com/eliben/pycparser/issues/197 # for discussion. if method.__doc__ is not None: _create_param_rules(cls, method) elif not issued_nodoc_warning: warnings.warn( 'parsing methods must have __doc__ for pycparser to work properly', RuntimeWarning, stacklevel=2) issued_nodoc_warning = True return cls def _create_param_rules(cls, func): """ Create ply.yacc rules based on a parameterized rule function Generates new methods (one per each pair of parameters) based on the template rule function `func`, and attaches them to `cls`. The rule function's parameters must be accessible via its `_params` attribute. """ for xxx, yyy in func._params: # Use the template method's body for each new method def param_rule(self, p): func(self, p) # Substitute in the params for the grammar rule and function name param_rule.__doc__ = func.__doc__.replace('xxx', xxx).replace('yyy', yyy) param_rule.__name__ = func.__name__.replace('xxx', xxx) # Attach the new method to the class setattr(cls, param_rule.__name__, param_rule)
4,875
Python
35.388059
91
0.568615
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ast_transforms.py
#------------------------------------------------------------------------------ # pycparser: ast_transforms.py # # Some utilities used by the parser to create a friendlier AST. # # Eli Bendersky [https://eli.thegreenplace.net/] # License: BSD #------------------------------------------------------------------------------ from . import c_ast def fix_switch_cases(switch_node): """ The 'case' statements in a 'switch' come out of parsing with one child node, so subsequent statements are just tucked to the parent Compound. Additionally, consecutive (fall-through) case statements come out messy. This is a peculiarity of the C grammar. The following: switch (myvar) { case 10: k = 10; p = k + 1; return 10; case 20: case 30: return 20; default: break; } Creates this tree (pseudo-dump): Switch ID: myvar Compound: Case 10: k = 10 p = k + 1 return 10 Case 20: Case 30: return 20 Default: break The goal of this transform is to fix this mess, turning it into the following: Switch ID: myvar Compound: Case 10: k = 10 p = k + 1 return 10 Case 20: Case 30: return 20 Default: break A fixed AST node is returned. The argument may be modified. """ assert isinstance(switch_node, c_ast.Switch) if not isinstance(switch_node.stmt, c_ast.Compound): return switch_node # The new Compound child for the Switch, which will collect children in the # correct order new_compound = c_ast.Compound([], switch_node.stmt.coord) # The last Case/Default node last_case = None # Goes over the children of the Compound below the Switch, adding them # either directly below new_compound or below the last Case as appropriate # (for `switch(cond) {}`, block_items would have been None) for child in (switch_node.stmt.block_items or []): if isinstance(child, (c_ast.Case, c_ast.Default)): # If it's a Case/Default: # 1. Add it to the Compound and mark as "last case" # 2. If its immediate child is also a Case or Default, promote it # to a sibling. new_compound.block_items.append(child) _extract_nested_case(child, new_compound.block_items) last_case = new_compound.block_items[-1] else: # Other statements are added as children to the last case, if it # exists. if last_case is None: new_compound.block_items.append(child) else: last_case.stmts.append(child) switch_node.stmt = new_compound return switch_node def _extract_nested_case(case_node, stmts_list): """ Recursively extract consecutive Case statements that are made nested by the parser and add them to the stmts_list. """ if isinstance(case_node.stmts[0], (c_ast.Case, c_ast.Default)): stmts_list.append(case_node.stmts.pop()) _extract_nested_case(stmts_list[-1], stmts_list) def fix_atomic_specifiers(decl): """ Atomic specifiers like _Atomic(type) are unusually structured, conferring a qualifier upon the contained type. This function fixes a decl with atomic specifiers to have a sane AST structure, by removing spurious Typename->TypeDecl pairs and attaching the _Atomic qualifier in the right place. """ # There can be multiple levels of _Atomic in a decl; fix them until a # fixed point is reached. while True: decl, found = _fix_atomic_specifiers_once(decl) if not found: break # Make sure to add an _Atomic qual on the topmost decl if needed. Also # restore the declname on the innermost TypeDecl (it gets placed in the # wrong place during construction). typ = decl while not isinstance(typ, c_ast.TypeDecl): try: typ = typ.type except AttributeError: return decl if '_Atomic' in typ.quals and '_Atomic' not in decl.quals: decl.quals.append('_Atomic') if typ.declname is None: typ.declname = decl.name return decl def _fix_atomic_specifiers_once(decl): """ Performs one 'fix' round of atomic specifiers. Returns (modified_decl, found) where found is True iff a fix was made. """ parent = decl grandparent = None node = decl.type while node is not None: if isinstance(node, c_ast.Typename) and '_Atomic' in node.quals: break try: grandparent = parent parent = node node = node.type except AttributeError: # If we've reached a node without a `type` field, it means we won't # find what we're looking for at this point; give up the search # and return the original decl unmodified. return decl, False assert isinstance(parent, c_ast.TypeDecl) grandparent.type = node.type if '_Atomic' not in node.type.quals: node.type.quals.append('_Atomic') return decl, True
5,691
Python
33.496969
79
0.54718
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ply/cpp.py
# ----------------------------------------------------------------------------- # cpp.py # # Author: David Beazley (http://www.dabeaz.com) # Copyright (C) 2017 # All rights reserved # # This module implements an ANSI-C style lexical preprocessor for PLY. # ----------------------------------------------------------------------------- import sys # Some Python 3 compatibility shims if sys.version_info.major < 3: STRING_TYPES = (str, unicode) else: STRING_TYPES = str xrange = range # ----------------------------------------------------------------------------- # Default preprocessor lexer definitions. These tokens are enough to get # a basic preprocessor working. Other modules may import these if they want # ----------------------------------------------------------------------------- tokens = ( 'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND' ) literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\"" # Whitespace def t_CPP_WS(t): r'\s+' t.lexer.lineno += t.value.count("\n") return t t_CPP_POUND = r'\#' t_CPP_DPOUND = r'\#\#' # Identifier t_CPP_ID = r'[A-Za-z_][\w_]*' # Integer literal def CPP_INTEGER(t): r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)' return t t_CPP_INTEGER = CPP_INTEGER # Floating literal t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' # String literal def t_CPP_STRING(t): r'\"([^\\\n]|(\\(.|\n)))*?\"' t.lexer.lineno += t.value.count("\n") return t # Character constant 'c' or L'c' def t_CPP_CHAR(t): r'(L)?\'([^\\\n]|(\\(.|\n)))*?\'' t.lexer.lineno += t.value.count("\n") return t # Comment def t_CPP_COMMENT1(t): r'(/\*(.|\n)*?\*/)' ncr = t.value.count("\n") t.lexer.lineno += ncr # replace with one space or a number of '\n' t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' ' return t # Line comment def t_CPP_COMMENT2(t): r'(//.*?(\n|$))' # replace with '/n' t.type = 'CPP_WS'; t.value = '\n' return t def t_error(t): t.type = t.value[0] t.value = t.value[0] t.lexer.skip(1) return t import re import copy import time import os.path # ----------------------------------------------------------------------------- # trigraph() # # Given an input string, this function replaces all trigraph sequences. # The following mapping is used: # # ??= # # ??/ \ # ??' ^ # ??( [ # ??) ] # ??! | # ??< { # ??> } # ??- ~ # ----------------------------------------------------------------------------- _trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''') _trigraph_rep = { '=':'#', '/':'\\', "'":'^', '(':'[', ')':']', '!':'|', '<':'{', '>':'}', '-':'~' } def trigraph(input): return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input) # ------------------------------------------------------------------ # Macro object # # This object holds information about preprocessor macros # # .name - Macro name (string) # .value - Macro value (a list of tokens) # .arglist - List of argument names # .variadic - Boolean indicating whether or not variadic macro # .vararg - Name of the variadic parameter # # When a macro is created, the macro replacement token sequence is # pre-scanned and used to create patch lists that are later used # during macro expansion # ------------------------------------------------------------------ class Macro(object): def __init__(self,name,value,arglist=None,variadic=False): self.name = name self.value = value self.arglist = arglist self.variadic = variadic if variadic: self.vararg = arglist[-1] self.source = None # ------------------------------------------------------------------ # Preprocessor object # # Object representing a preprocessor. Contains macro definitions, # include directories, and other information # ------------------------------------------------------------------ class Preprocessor(object): def __init__(self,lexer=None): if lexer is None: lexer = lex.lexer self.lexer = lexer self.macros = { } self.path = [] self.temp_path = [] # Probe the lexer for selected tokens self.lexprobe() tm = time.localtime() self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm)) self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm)) self.parser = None # ----------------------------------------------------------------------------- # tokenize() # # Utility function. Given a string of text, tokenize into a list of tokens # ----------------------------------------------------------------------------- def tokenize(self,text): tokens = [] self.lexer.input(text) while True: tok = self.lexer.token() if not tok: break tokens.append(tok) return tokens # --------------------------------------------------------------------- # error() # # Report a preprocessor error/warning of some kind # ---------------------------------------------------------------------- def error(self,file,line,msg): print("%s:%d %s" % (file,line,msg)) # ---------------------------------------------------------------------- # lexprobe() # # This method probes the preprocessor lexer object to discover # the token types of symbols that are important to the preprocessor. # If this works right, the preprocessor will simply "work" # with any suitable lexer regardless of how tokens have been named. # ---------------------------------------------------------------------- def lexprobe(self): # Determine the token type for identifiers self.lexer.input("identifier") tok = self.lexer.token() if not tok or tok.value != "identifier": print("Couldn't determine identifier type") else: self.t_ID = tok.type # Determine the token type for integers self.lexer.input("12345") tok = self.lexer.token() if not tok or int(tok.value) != 12345: print("Couldn't determine integer type") else: self.t_INTEGER = tok.type self.t_INTEGER_TYPE = type(tok.value) # Determine the token type for strings enclosed in double quotes self.lexer.input("\"filename\"") tok = self.lexer.token() if not tok or tok.value != "\"filename\"": print("Couldn't determine string type") else: self.t_STRING = tok.type # Determine the token type for whitespace--if any self.lexer.input(" ") tok = self.lexer.token() if not tok or tok.value != " ": self.t_SPACE = None else: self.t_SPACE = tok.type # Determine the token type for newlines self.lexer.input("\n") tok = self.lexer.token() if not tok or tok.value != "\n": self.t_NEWLINE = None print("Couldn't determine token for newlines") else: self.t_NEWLINE = tok.type self.t_WS = (self.t_SPACE, self.t_NEWLINE) # Check for other characters used by the preprocessor chars = [ '<','>','#','##','\\','(',')',',','.'] for c in chars: self.lexer.input(c) tok = self.lexer.token() if not tok or tok.value != c: print("Unable to lex '%s' required for preprocessor" % c) # ---------------------------------------------------------------------- # add_path() # # Adds a search path to the preprocessor. # ---------------------------------------------------------------------- def add_path(self,path): self.path.append(path) # ---------------------------------------------------------------------- # group_lines() # # Given an input string, this function splits it into lines. Trailing whitespace # is removed. Any line ending with \ is grouped with the next line. This # function forms the lowest level of the preprocessor---grouping into text into # a line-by-line format. # ---------------------------------------------------------------------- def group_lines(self,input): lex = self.lexer.clone() lines = [x.rstrip() for x in input.splitlines()] for i in xrange(len(lines)): j = i+1 while lines[i].endswith('\\') and (j < len(lines)): lines[i] = lines[i][:-1]+lines[j] lines[j] = "" j += 1 input = "\n".join(lines) lex.input(input) lex.lineno = 1 current_line = [] while True: tok = lex.token() if not tok: break current_line.append(tok) if tok.type in self.t_WS and '\n' in tok.value: yield current_line current_line = [] if current_line: yield current_line # ---------------------------------------------------------------------- # tokenstrip() # # Remove leading/trailing whitespace tokens from a token list # ---------------------------------------------------------------------- def tokenstrip(self,tokens): i = 0 while i < len(tokens) and tokens[i].type in self.t_WS: i += 1 del tokens[:i] i = len(tokens)-1 while i >= 0 and tokens[i].type in self.t_WS: i -= 1 del tokens[i+1:] return tokens # ---------------------------------------------------------------------- # collect_args() # # Collects comma separated arguments from a list of tokens. The arguments # must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions) # where tokencount is the number of tokens consumed, args is a list of arguments, # and positions is a list of integers containing the starting index of each # argument. Each argument is represented by a list of tokens. # # When collecting arguments, leading and trailing whitespace is removed # from each argument. # # This function properly handles nested parenthesis and commas---these do not # define new arguments. # ---------------------------------------------------------------------- def collect_args(self,tokenlist): args = [] positions = [] current_arg = [] nesting = 1 tokenlen = len(tokenlist) # Search for the opening '('. i = 0 while (i < tokenlen) and (tokenlist[i].type in self.t_WS): i += 1 if (i < tokenlen) and (tokenlist[i].value == '('): positions.append(i+1) else: self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments") return 0, [], [] i += 1 while i < tokenlen: t = tokenlist[i] if t.value == '(': current_arg.append(t) nesting += 1 elif t.value == ')': nesting -= 1 if nesting == 0: if current_arg: args.append(self.tokenstrip(current_arg)) positions.append(i) return i+1,args,positions current_arg.append(t) elif t.value == ',' and nesting == 1: args.append(self.tokenstrip(current_arg)) positions.append(i+1) current_arg = [] else: current_arg.append(t) i += 1 # Missing end argument self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments") return 0, [],[] # ---------------------------------------------------------------------- # macro_prescan() # # Examine the macro value (token sequence) and identify patch points # This is used to speed up macro expansion later on---we'll know # right away where to apply patches to the value to form the expansion # ---------------------------------------------------------------------- def macro_prescan(self,macro): macro.patch = [] # Standard macro arguments macro.str_patch = [] # String conversion expansion macro.var_comma_patch = [] # Variadic macro comma patch i = 0 while i < len(macro.value): if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist: argnum = macro.arglist.index(macro.value[i].value) # Conversion of argument to a string if i > 0 and macro.value[i-1].value == '#': macro.value[i] = copy.copy(macro.value[i]) macro.value[i].type = self.t_STRING del macro.value[i-1] macro.str_patch.append((argnum,i-1)) continue # Concatenation elif (i > 0 and macro.value[i-1].value == '##'): macro.patch.append(('c',argnum,i-1)) del macro.value[i-1] continue elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'): macro.patch.append(('c',argnum,i)) i += 1 continue # Standard expansion else: macro.patch.append(('e',argnum,i)) elif macro.value[i].value == '##': if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \ ((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \ (macro.value[i+1].value == macro.vararg): macro.var_comma_patch.append(i-1) i += 1 macro.patch.sort(key=lambda x: x[2],reverse=True) # ---------------------------------------------------------------------- # macro_expand_args() # # Given a Macro and list of arguments (each a token list), this method # returns an expanded version of a macro. The return value is a token sequence # representing the replacement macro tokens # ---------------------------------------------------------------------- def macro_expand_args(self,macro,args): # Make a copy of the macro token sequence rep = [copy.copy(_x) for _x in macro.value] # Make string expansion patches. These do not alter the length of the replacement sequence str_expansion = {} for argnum, i in macro.str_patch: if argnum not in str_expansion: str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\") rep[i] = copy.copy(rep[i]) rep[i].value = str_expansion[argnum] # Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid comma_patch = False if macro.variadic and not args[-1]: for i in macro.var_comma_patch: rep[i] = None comma_patch = True # Make all other patches. The order of these matters. It is assumed that the patch list # has been sorted in reverse order of patch location since replacements will cause the # size of the replacement sequence to expand from the patch point. expanded = { } for ptype, argnum, i in macro.patch: # Concatenation. Argument is left unexpanded if ptype == 'c': rep[i:i+1] = args[argnum] # Normal expansion. Argument is macro expanded first elif ptype == 'e': if argnum not in expanded: expanded[argnum] = self.expand_macros(args[argnum]) rep[i:i+1] = expanded[argnum] # Get rid of removed comma if necessary if comma_patch: rep = [_i for _i in rep if _i] return rep # ---------------------------------------------------------------------- # expand_macros() # # Given a list of tokens, this function performs macro expansion. # The expanded argument is a dictionary that contains macros already # expanded. This is used to prevent infinite recursion. # ---------------------------------------------------------------------- def expand_macros(self,tokens,expanded=None): if expanded is None: expanded = {} i = 0 while i < len(tokens): t = tokens[i] if t.type == self.t_ID: if t.value in self.macros and t.value not in expanded: # Yes, we found a macro match expanded[t.value] = True m = self.macros[t.value] if not m.arglist: # A simple macro ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded) for e in ex: e.lineno = t.lineno tokens[i:i+1] = ex i += len(ex) else: # A macro with arguments j = i + 1 while j < len(tokens) and tokens[j].type in self.t_WS: j += 1 if tokens[j].value == '(': tokcount,args,positions = self.collect_args(tokens[j:]) if not m.variadic and len(args) != len(m.arglist): self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist))) i = j + tokcount elif m.variadic and len(args) < len(m.arglist)-1: if len(m.arglist) > 2: self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1)) else: self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1)) i = j + tokcount else: if m.variadic: if len(args) == len(m.arglist)-1: args.append([]) else: args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1] del args[len(m.arglist):] # Get macro replacement text rep = self.macro_expand_args(m,args) rep = self.expand_macros(rep,expanded) for r in rep: r.lineno = t.lineno tokens[i:j+tokcount] = rep i += len(rep) del expanded[t.value] continue elif t.value == '__LINE__': t.type = self.t_INTEGER t.value = self.t_INTEGER_TYPE(t.lineno) i += 1 return tokens # ---------------------------------------------------------------------- # evalexpr() # # Evaluate an expression token sequence for the purposes of evaluating # integral expressions. # ---------------------------------------------------------------------- def evalexpr(self,tokens): # tokens = tokenize(line) # Search for defined macros i = 0 while i < len(tokens): if tokens[i].type == self.t_ID and tokens[i].value == 'defined': j = i + 1 needparen = False result = "0L" while j < len(tokens): if tokens[j].type in self.t_WS: j += 1 continue elif tokens[j].type == self.t_ID: if tokens[j].value in self.macros: result = "1L" else: result = "0L" if not needparen: break elif tokens[j].value == '(': needparen = True elif tokens[j].value == ')': break else: self.error(self.source,tokens[i].lineno,"Malformed defined()") j += 1 tokens[i].type = self.t_INTEGER tokens[i].value = self.t_INTEGER_TYPE(result) del tokens[i+1:j+1] i += 1 tokens = self.expand_macros(tokens) for i,t in enumerate(tokens): if t.type == self.t_ID: tokens[i] = copy.copy(t) tokens[i].type = self.t_INTEGER tokens[i].value = self.t_INTEGER_TYPE("0L") elif t.type == self.t_INTEGER: tokens[i] = copy.copy(t) # Strip off any trailing suffixes tokens[i].value = str(tokens[i].value) while tokens[i].value[-1] not in "0123456789abcdefABCDEF": tokens[i].value = tokens[i].value[:-1] expr = "".join([str(x.value) for x in tokens]) expr = expr.replace("&&"," and ") expr = expr.replace("||"," or ") expr = expr.replace("!"," not ") try: result = eval(expr) except Exception: self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression") result = 0 return result # ---------------------------------------------------------------------- # parsegen() # # Parse an input string/ # ---------------------------------------------------------------------- def parsegen(self,input,source=None): # Replace trigraph sequences t = trigraph(input) lines = self.group_lines(t) if not source: source = "" self.define("__FILE__ \"%s\"" % source) self.source = source chunk = [] enable = True iftrigger = False ifstack = [] for x in lines: for i,tok in enumerate(x): if tok.type not in self.t_WS: break if tok.value == '#': # Preprocessor directive # insert necessary whitespace instead of eaten tokens for tok in x: if tok.type in self.t_WS and '\n' in tok.value: chunk.append(tok) dirtokens = self.tokenstrip(x[i+1:]) if dirtokens: name = dirtokens[0].value args = self.tokenstrip(dirtokens[1:]) else: name = "" args = [] if name == 'define': if enable: for tok in self.expand_macros(chunk): yield tok chunk = [] self.define(args) elif name == 'include': if enable: for tok in self.expand_macros(chunk): yield tok chunk = [] oldfile = self.macros['__FILE__'] for tok in self.include(args): yield tok self.macros['__FILE__'] = oldfile self.source = source elif name == 'undef': if enable: for tok in self.expand_macros(chunk): yield tok chunk = [] self.undef(args) elif name == 'ifdef': ifstack.append((enable,iftrigger)) if enable: if not args[0].value in self.macros: enable = False iftrigger = False else: iftrigger = True elif name == 'ifndef': ifstack.append((enable,iftrigger)) if enable: if args[0].value in self.macros: enable = False iftrigger = False else: iftrigger = True elif name == 'if': ifstack.append((enable,iftrigger)) if enable: result = self.evalexpr(args) if not result: enable = False iftrigger = False else: iftrigger = True elif name == 'elif': if ifstack: if ifstack[-1][0]: # We only pay attention if outer "if" allows this if enable: # If already true, we flip enable False enable = False elif not iftrigger: # If False, but not triggered yet, we'll check expression result = self.evalexpr(args) if result: enable = True iftrigger = True else: self.error(self.source,dirtokens[0].lineno,"Misplaced #elif") elif name == 'else': if ifstack: if ifstack[-1][0]: if enable: enable = False elif not iftrigger: enable = True iftrigger = True else: self.error(self.source,dirtokens[0].lineno,"Misplaced #else") elif name == 'endif': if ifstack: enable,iftrigger = ifstack.pop() else: self.error(self.source,dirtokens[0].lineno,"Misplaced #endif") else: # Unknown preprocessor directive pass else: # Normal text if enable: chunk.extend(x) for tok in self.expand_macros(chunk): yield tok chunk = [] # ---------------------------------------------------------------------- # include() # # Implementation of file-inclusion # ---------------------------------------------------------------------- def include(self,tokens): # Try to extract the filename and then process an include file if not tokens: return if tokens: if tokens[0].value != '<' and tokens[0].type != self.t_STRING: tokens = self.expand_macros(tokens) if tokens[0].value == '<': # Include <...> i = 1 while i < len(tokens): if tokens[i].value == '>': break i += 1 else: print("Malformed #include <...>") return filename = "".join([x.value for x in tokens[1:i]]) path = self.path + [""] + self.temp_path elif tokens[0].type == self.t_STRING: filename = tokens[0].value[1:-1] path = self.temp_path + [""] + self.path else: print("Malformed #include statement") return for p in path: iname = os.path.join(p,filename) try: data = open(iname,"r").read() dname = os.path.dirname(iname) if dname: self.temp_path.insert(0,dname) for tok in self.parsegen(data,filename): yield tok if dname: del self.temp_path[0] break except IOError: pass else: print("Couldn't find '%s'" % filename) # ---------------------------------------------------------------------- # define() # # Define a new macro # ---------------------------------------------------------------------- def define(self,tokens): if isinstance(tokens,STRING_TYPES): tokens = self.tokenize(tokens) linetok = tokens try: name = linetok[0] if len(linetok) > 1: mtype = linetok[1] else: mtype = None if not mtype: m = Macro(name.value,[]) self.macros[name.value] = m elif mtype.type in self.t_WS: # A normal macro m = Macro(name.value,self.tokenstrip(linetok[2:])) self.macros[name.value] = m elif mtype.value == '(': # A macro with arguments tokcount, args, positions = self.collect_args(linetok[1:]) variadic = False for a in args: if variadic: print("No more arguments may follow a variadic argument") break astr = "".join([str(_i.value) for _i in a]) if astr == "...": variadic = True a[0].type = self.t_ID a[0].value = '__VA_ARGS__' variadic = True del a[1:] continue elif astr[-3:] == "..." and a[0].type == self.t_ID: variadic = True del a[1:] # If, for some reason, "." is part of the identifier, strip off the name for the purposes # of macro expansion if a[0].value[-3:] == '...': a[0].value = a[0].value[:-3] continue if len(a) > 1 or a[0].type != self.t_ID: print("Invalid macro argument") break else: mvalue = self.tokenstrip(linetok[1+tokcount:]) i = 0 while i < len(mvalue): if i+1 < len(mvalue): if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##': del mvalue[i] continue elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS: del mvalue[i+1] i += 1 m = Macro(name.value,mvalue,[x[0].value for x in args],variadic) self.macro_prescan(m) self.macros[name.value] = m else: print("Bad macro definition") except LookupError: print("Bad macro definition") # ---------------------------------------------------------------------- # undef() # # Undefine a macro # ---------------------------------------------------------------------- def undef(self,tokens): id = tokens[0].value try: del self.macros[id] except LookupError: pass # ---------------------------------------------------------------------- # parse() # # Parse input text. # ---------------------------------------------------------------------- def parse(self,input,source=None,ignore={}): self.ignore = ignore self.parser = self.parsegen(input,source) # ---------------------------------------------------------------------- # token() # # Method to return individual tokens # ---------------------------------------------------------------------- def token(self): try: while True: tok = next(self.parser) if tok.type not in self.ignore: return tok except StopIteration: self.parser = None return None if __name__ == '__main__': import ply.lex as lex lexer = lex.lex() # Run a preprocessor import sys f = open(sys.argv[1]) input = f.read() p = Preprocessor(lexer) p.parse(input,sys.argv[1]) while True: tok = p.token() if not tok: break print(p.source, tok)
33,282
Python
35.736203
141
0.420588
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ply/yacc.py
# ----------------------------------------------------------------------------- # ply: yacc.py # # Copyright (C) 2001-2017 # David M. Beazley (Dabeaz LLC) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the David Beazley or Dabeaz LLC may be used to # endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- # # This implements an LR parser that is constructed from grammar rules defined # as Python functions. The grammer is specified by supplying the BNF inside # Python documentation strings. The inspiration for this technique was borrowed # from John Aycock's Spark parsing system. PLY might be viewed as cross between # Spark and the GNU bison utility. # # The current implementation is only somewhat object-oriented. The # LR parser itself is defined in terms of an object (which allows multiple # parsers to co-exist). However, most of the variables used during table # construction are defined in terms of global variables. Users shouldn't # notice unless they are trying to define multiple parsers at the same # time using threads (in which case they should have their head examined). # # This implementation supports both SLR and LALR(1) parsing. LALR(1) # support was originally implemented by Elias Ioup ([email protected]), # using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles, # Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced # by the more efficient DeRemer and Pennello algorithm. # # :::::::: WARNING ::::::: # # Construction of LR parsing tables is fairly complicated and expensive. # To make this module run fast, a *LOT* of work has been put into # optimization---often at the expensive of readability and what might # consider to be good Python "coding style." Modify the code at your # own risk! # ---------------------------------------------------------------------------- import re import types import sys import os.path import inspect import base64 import warnings __version__ = '3.10' __tabversion__ = '3.10' #----------------------------------------------------------------------------- # === User configurable parameters === # # Change these to modify the default behavior of yacc (if you wish) #----------------------------------------------------------------------------- yaccdebug = True # Debugging mode. If set, yacc generates a # a 'parser.out' file in the current directory debug_file = 'parser.out' # Default name of the debugging file tab_module = 'parsetab' # Default name of the table module default_lr = 'LALR' # Default LR table generation method error_count = 3 # Number of symbols that must be shifted to leave recovery mode yaccdevel = False # Set to True if developing yacc. This turns off optimized # implementations of certain functions. resultlimit = 40 # Size limit of results when running in debug mode. pickle_protocol = 0 # Protocol to use when writing pickle files # String type-checking compatibility if sys.version_info[0] < 3: string_types = basestring else: string_types = str MAXINT = sys.maxsize # This object is a stand-in for a logging object created by the # logging module. PLY will use this by default to create things # such as the parser.out file. If a user wants more detailed # information, they can create their own logging object and pass # it into PLY. class PlyLogger(object): def __init__(self, f): self.f = f def debug(self, msg, *args, **kwargs): self.f.write((msg % args) + '\n') info = debug def warning(self, msg, *args, **kwargs): self.f.write('WARNING: ' + (msg % args) + '\n') def error(self, msg, *args, **kwargs): self.f.write('ERROR: ' + (msg % args) + '\n') critical = debug # Null logger is used when no output is generated. Does nothing. class NullLogger(object): def __getattribute__(self, name): return self def __call__(self, *args, **kwargs): return self # Exception raised for yacc-related errors class YaccError(Exception): pass # Format the result message that the parser produces when running in debug mode. def format_result(r): repr_str = repr(r) if '\n' in repr_str: repr_str = repr(repr_str) if len(repr_str) > resultlimit: repr_str = repr_str[:resultlimit] + ' ...' result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str) return result # Format stack entries when the parser is running in debug mode def format_stack_entry(r): repr_str = repr(r) if '\n' in repr_str: repr_str = repr(repr_str) if len(repr_str) < 16: return repr_str else: return '<%s @ 0x%x>' % (type(r).__name__, id(r)) # Panic mode error recovery support. This feature is being reworked--much of the # code here is to offer a deprecation/backwards compatible transition _errok = None _token = None _restart = None _warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error(). Instead, invoke the methods on the associated parser instance: def p_error(p): ... # Use parser.errok(), parser.token(), parser.restart() ... parser = yacc.yacc() ''' def errok(): warnings.warn(_warnmsg) return _errok() def restart(): warnings.warn(_warnmsg) return _restart() def token(): warnings.warn(_warnmsg) return _token() # Utility function to call the p_error() function with some deprecation hacks def call_errorfunc(errorfunc, token, parser): global _errok, _token, _restart _errok = parser.errok _token = parser.token _restart = parser.restart r = errorfunc(token) try: del _errok, _token, _restart except NameError: pass return r #----------------------------------------------------------------------------- # === LR Parsing Engine === # # The following classes are used for the LR parser itself. These are not # used during table construction and are independent of the actual LR # table generation algorithm #----------------------------------------------------------------------------- # This class is used to hold non-terminal grammar symbols during parsing. # It normally has the following attributes set: # .type = Grammar symbol type # .value = Symbol value # .lineno = Starting line number # .endlineno = Ending line number (optional, set automatically) # .lexpos = Starting lex position # .endlexpos = Ending lex position (optional, set automatically) class YaccSymbol: def __str__(self): return self.type def __repr__(self): return str(self) # This class is a wrapper around the objects actually passed to each # grammar rule. Index lookup and assignment actually assign the # .value attribute of the underlying YaccSymbol object. # The lineno() method returns the line number of a given # item (or 0 if not defined). The linespan() method returns # a tuple of (startline,endline) representing the range of lines # for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos) # representing the range of positional information for a symbol. class YaccProduction: def __init__(self, s, stack=None): self.slice = s self.stack = stack self.lexer = None self.parser = None def __getitem__(self, n): if isinstance(n, slice): return [s.value for s in self.slice[n]] elif n >= 0: return self.slice[n].value else: return self.stack[n].value def __setitem__(self, n, v): self.slice[n].value = v def __getslice__(self, i, j): return [s.value for s in self.slice[i:j]] def __len__(self): return len(self.slice) def lineno(self, n): return getattr(self.slice[n], 'lineno', 0) def set_lineno(self, n, lineno): self.slice[n].lineno = lineno def linespan(self, n): startline = getattr(self.slice[n], 'lineno', 0) endline = getattr(self.slice[n], 'endlineno', startline) return startline, endline def lexpos(self, n): return getattr(self.slice[n], 'lexpos', 0) def lexspan(self, n): startpos = getattr(self.slice[n], 'lexpos', 0) endpos = getattr(self.slice[n], 'endlexpos', startpos) return startpos, endpos def error(self): raise SyntaxError # ----------------------------------------------------------------------------- # == LRParser == # # The LR Parsing engine. # ----------------------------------------------------------------------------- class LRParser: def __init__(self, lrtab, errorf): self.productions = lrtab.lr_productions self.action = lrtab.lr_action self.goto = lrtab.lr_goto self.errorfunc = errorf self.set_defaulted_states() self.errorok = True def errok(self): self.errorok = True def restart(self): del self.statestack[:] del self.symstack[:] sym = YaccSymbol() sym.type = '$end' self.symstack.append(sym) self.statestack.append(0) # Defaulted state support. # This method identifies parser states where there is only one possible reduction action. # For such states, the parser can make a choose to make a rule reduction without consuming # the next look-ahead token. This delayed invocation of the tokenizer can be useful in # certain kinds of advanced parsing situations where the lexer and parser interact with # each other or change states (i.e., manipulation of scope, lexer states, etc.). # # See: https://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions def set_defaulted_states(self): self.defaulted_states = {} for state, actions in self.action.items(): rules = list(actions.values()) if len(rules) == 1 and rules[0] < 0: self.defaulted_states[state] = rules[0] def disable_defaulted_states(self): self.defaulted_states = {} def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): if debug or yaccdevel: if isinstance(debug, int): debug = PlyLogger(sys.stderr) return self.parsedebug(input, lexer, debug, tracking, tokenfunc) elif tracking: return self.parseopt(input, lexer, debug, tracking, tokenfunc) else: return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parsedebug(). # # This is the debugging enabled version of parse(). All changes made to the # parsing engine should be made here. Optimized versions of this function # are automatically created by the ply/ygen.py script. This script cuts out # sections enclosed in markers such as this: # # #--! DEBUG # statements # #--! DEBUG # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): #--! parsedebug-start lookahead = None # Current lookahead symbol lookaheadstack = [] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) defaulted_states = self.defaulted_states # Local reference to defaulted states pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery #--! DEBUG debug.info('PLY: PARSE DEBUG START') #--! DEBUG # If no lexer was given, we will try to use the lex module if not lexer: from . import lex lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set the parser() token method (sometimes used in error recovery) self.token = get_token # Set up the state and symbol stacks statestack = [] # Stack of parsing states self.statestack = statestack symstack = [] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer #--! DEBUG debug.debug('') debug.debug('State : %s', state) #--! DEBUG if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] #--! DEBUG debug.debug('Defaulted state %s: Reduce using %d', state, -t) #--! DEBUG #--! DEBUG debug.debug('Stack : %s', ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) #--! DEBUG if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t #--! DEBUG debug.debug('Action : Shift and goto state %s', t) #--! DEBUG symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -= 1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None #--! DEBUG if plen: debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']', goto[statestack[-1-plen]][pname]) else: debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [], goto[statestack[-1]][pname]) #--! DEBUG if plen: targ = symstack[-plen-1:] targ[0] = sym #--! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1, 'endlineno', t1.lineno) sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) #--! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] self.state = state p.callable(pslice) del statestack[-plen:] #--! DEBUG debug.info('Result : %s', format_result(pslice[0])) #--! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token symstack.extend(targ[1:-1]) # Put the production slice back on the stack statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: #--! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos #--! TRACKING targ = [sym] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object self.state = state p.callable(pslice) #--! DEBUG debug.info('Result : %s', format_result(pslice[0])) #--! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n, 'value', None) #--! DEBUG debug.info('Done : Returning %s', format_result(result)) debug.info('PLY: PARSE DEBUG END') #--! DEBUG return result if t is None: #--! DEBUG debug.error('Error : %s', ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) #--! DEBUG # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: if errtoken and not hasattr(errtoken, 'lexer'): errtoken.lexer = lexer self.state = state tok = call_errorfunc(self.errorfunc, errtoken, self) if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue #--! TRACKING if tracking: sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) #--! TRACKING lookahead = None continue # Create the error symbol for the first time and make it the new lookahead symbol t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() #--! TRACKING if tracking: lookahead.lineno = sym.lineno lookahead.lexpos = sym.lexpos #--! TRACKING statestack.pop() state = statestack[-1] continue # Call an error function here raise RuntimeError('yacc: internal parser error!!!\n') #--! parsedebug-end # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt(). # # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY! # This code is automatically generated by the ply/ygen.py script. Make # changes to the parsedebug() method instead. # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): #--! parseopt-start lookahead = None # Current lookahead symbol lookaheadstack = [] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) defaulted_states = self.defaulted_states # Local reference to defaulted states pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: from . import lex lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set the parser() token method (sometimes used in error recovery) self.token = get_token # Set up the state and symbol stacks statestack = [] # Stack of parsing states self.statestack = statestack symstack = [] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -= 1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym #--! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1, 'endlineno', t1.lineno) sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) #--! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] self.state = state p.callable(pslice) del statestack[-plen:] symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token symstack.extend(targ[1:-1]) # Put the production slice back on the stack statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: #--! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos #--! TRACKING targ = [sym] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object self.state = state p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n, 'value', None) return result if t is None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: if errtoken and not hasattr(errtoken, 'lexer'): errtoken.lexer = lexer self.state = state tok = call_errorfunc(self.errorfunc, errtoken, self) if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue #--! TRACKING if tracking: sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) #--! TRACKING lookahead = None continue # Create the error symbol for the first time and make it the new lookahead symbol t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() #--! TRACKING if tracking: lookahead.lineno = sym.lineno lookahead.lexpos = sym.lexpos #--! TRACKING statestack.pop() state = statestack[-1] continue # Call an error function here raise RuntimeError('yacc: internal parser error!!!\n') #--! parseopt-end # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt_notrack(). # # Optimized version of parseopt() with line number tracking removed. # DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated # by the ply/ygen.py script. Make changes to the parsedebug() method instead. # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): #--! parseopt-notrack-start lookahead = None # Current lookahead symbol lookaheadstack = [] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) defaulted_states = self.defaulted_states # Local reference to defaulted states pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: from . import lex lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set the parser() token method (sometimes used in error recovery) self.token = get_token # Set up the state and symbol stacks statestack = [] # Stack of parsing states self.statestack = statestack symstack = [] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -= 1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] self.state = state p.callable(pslice) del statestack[-plen:] symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token symstack.extend(targ[1:-1]) # Put the production slice back on the stack statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: targ = [sym] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object self.state = state p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n, 'value', None) return result if t is None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: if errtoken and not hasattr(errtoken, 'lexer'): errtoken.lexer = lexer self.state = state tok = call_errorfunc(self.errorfunc, errtoken, self) if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue lookahead = None continue # Create the error symbol for the first time and make it the new lookahead symbol t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() statestack.pop() state = statestack[-1] continue # Call an error function here raise RuntimeError('yacc: internal parser error!!!\n') #--! parseopt-notrack-end # ----------------------------------------------------------------------------- # === Grammar Representation === # # The following functions, classes, and variables are used to represent and # manipulate the rules that make up a grammar. # ----------------------------------------------------------------------------- # regex matching identifiers _is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$') # ----------------------------------------------------------------------------- # class Production: # # This class stores the raw information about a single production or grammar rule. # A grammar rule refers to a specification such as this: # # expr : expr PLUS term # # Here are the basic attributes defined on all productions # # name - Name of the production. For example 'expr' # prod - A list of symbols on the right side ['expr','PLUS','term'] # prec - Production precedence level # number - Production number. # func - Function that executes on reduce # file - File where production function is defined # lineno - Line number where production function is defined # # The following attributes are defined or optional. # # len - Length of the production (number of symbols on right hand side) # usyms - Set of unique symbols found in the production # ----------------------------------------------------------------------------- class Production(object): reduced = 0 def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0): self.name = name self.prod = tuple(prod) self.number = number self.func = func self.callable = None self.file = file self.line = line self.prec = precedence # Internal settings used during table construction self.len = len(self.prod) # Length of the production # Create a list of unique production symbols used in the production self.usyms = [] for s in self.prod: if s not in self.usyms: self.usyms.append(s) # List of all LR items for the production self.lr_items = [] self.lr_next = None # Create a string representation if self.prod: self.str = '%s -> %s' % (self.name, ' '.join(self.prod)) else: self.str = '%s -> <empty>' % self.name def __str__(self): return self.str def __repr__(self): return 'Production(' + str(self) + ')' def __len__(self): return len(self.prod) def __nonzero__(self): return 1 def __getitem__(self, index): return self.prod[index] # Return the nth lr_item from the production (or None if at the end) def lr_item(self, n): if n > len(self.prod): return None p = LRItem(self, n) # Precompute the list of productions immediately following. try: p.lr_after = Prodnames[p.prod[n+1]] except (IndexError, KeyError): p.lr_after = [] try: p.lr_before = p.prod[n-1] except IndexError: p.lr_before = None return p # Bind the production function name to a callable def bind(self, pdict): if self.func: self.callable = pdict[self.func] # This class serves as a minimal standin for Production objects when # reading table data from files. It only contains information # actually used by the LR parsing engine, plus some additional # debugging information. class MiniProduction(object): def __init__(self, str, name, len, func, file, line): self.name = name self.len = len self.func = func self.callable = None self.file = file self.line = line self.str = str def __str__(self): return self.str def __repr__(self): return 'MiniProduction(%s)' % self.str # Bind the production function name to a callable def bind(self, pdict): if self.func: self.callable = pdict[self.func] # ----------------------------------------------------------------------------- # class LRItem # # This class represents a specific stage of parsing a production rule. For # example: # # expr : expr . PLUS term # # In the above, the "." represents the current location of the parse. Here # basic attributes: # # name - Name of the production. For example 'expr' # prod - A list of symbols on the right side ['expr','.', 'PLUS','term'] # number - Production number. # # lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term' # then lr_next refers to 'expr -> expr PLUS . term' # lr_index - LR item index (location of the ".") in the prod list. # lookaheads - LALR lookahead symbols for this item # len - Length of the production (number of symbols on right hand side) # lr_after - List of all productions that immediately follow # lr_before - Grammar symbol immediately before # ----------------------------------------------------------------------------- class LRItem(object): def __init__(self, p, n): self.name = p.name self.prod = list(p.prod) self.number = p.number self.lr_index = n self.lookaheads = {} self.prod.insert(n, '.') self.prod = tuple(self.prod) self.len = len(self.prod) self.usyms = p.usyms def __str__(self): if self.prod: s = '%s -> %s' % (self.name, ' '.join(self.prod)) else: s = '%s -> <empty>' % self.name return s def __repr__(self): return 'LRItem(' + str(self) + ')' # ----------------------------------------------------------------------------- # rightmost_terminal() # # Return the rightmost terminal from a list of symbols. Used in add_production() # ----------------------------------------------------------------------------- def rightmost_terminal(symbols, terminals): i = len(symbols) - 1 while i >= 0: if symbols[i] in terminals: return symbols[i] i -= 1 return None # ----------------------------------------------------------------------------- # === GRAMMAR CLASS === # # The following class represents the contents of the specified grammar along # with various computed properties such as first sets, follow sets, LR items, etc. # This data is used for critical parts of the table generation process later. # ----------------------------------------------------------------------------- class GrammarError(YaccError): pass class Grammar(object): def __init__(self, terminals): self.Productions = [None] # A list of all of the productions. The first # entry is always reserved for the purpose of # building an augmented grammar self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all # productions of that nonterminal. self.Prodmap = {} # A dictionary that is only used to detect duplicate # productions. self.Terminals = {} # A dictionary mapping the names of terminal symbols to a # list of the rules where they are used. for term in terminals: self.Terminals[term] = [] self.Terminals['error'] = [] self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list # of rule numbers where they are used. self.First = {} # A dictionary of precomputed FIRST(x) symbols self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the # form ('right',level) or ('nonassoc', level) or ('left',level) self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer. # This is only used to provide error checking and to generate # a warning about unused precedence rules. self.Start = None # Starting symbol for the grammar def __len__(self): return len(self.Productions) def __getitem__(self, index): return self.Productions[index] # ----------------------------------------------------------------------------- # set_precedence() # # Sets the precedence for a given terminal. assoc is the associativity such as # 'left','right', or 'nonassoc'. level is a numeric level. # # ----------------------------------------------------------------------------- def set_precedence(self, term, assoc, level): assert self.Productions == [None], 'Must call set_precedence() before add_production()' if term in self.Precedence: raise GrammarError('Precedence already specified for terminal %r' % term) if assoc not in ['left', 'right', 'nonassoc']: raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'") self.Precedence[term] = (assoc, level) # ----------------------------------------------------------------------------- # add_production() # # Given an action function, this function assembles a production rule and # computes its precedence level. # # The production rule is supplied as a list of symbols. For example, # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and # symbols ['expr','PLUS','term']. # # Precedence is determined by the precedence of the right-most non-terminal # or the precedence of a terminal specified by %prec. # # A variety of error checks are performed to make sure production symbols # are valid and that %prec is used correctly. # ----------------------------------------------------------------------------- def add_production(self, prodname, syms, func=None, file='', line=0): if prodname in self.Terminals: raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname)) if prodname == 'error': raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname)) if not _is_identifier.match(prodname): raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname)) # Look for literal tokens for n, s in enumerate(syms): if s[0] in "'\"": try: c = eval(s) if (len(c) > 1): raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' % (file, line, s, prodname)) if c not in self.Terminals: self.Terminals[c] = [] syms[n] = c continue except SyntaxError: pass if not _is_identifier.match(s) and s != '%prec': raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname)) # Determine the precedence level if '%prec' in syms: if syms[-1] == '%prec': raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line)) if syms[-2] != '%prec': raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' % (file, line)) precname = syms[-1] prodprec = self.Precedence.get(precname) if not prodprec: raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname)) else: self.UsedPrecedence.add(precname) del syms[-2:] # Drop %prec from the rule else: # If no %prec, precedence is determined by the rightmost terminal symbol precname = rightmost_terminal(syms, self.Terminals) prodprec = self.Precedence.get(precname, ('right', 0)) # See if the rule is already in the rulemap map = '%s -> %s' % (prodname, syms) if map in self.Prodmap: m = self.Prodmap[map] raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) + 'Previous definition at %s:%d' % (m.file, m.line)) # From this point on, everything is valid. Create a new Production instance pnumber = len(self.Productions) if prodname not in self.Nonterminals: self.Nonterminals[prodname] = [] # Add the production number to Terminals and Nonterminals for t in syms: if t in self.Terminals: self.Terminals[t].append(pnumber) else: if t not in self.Nonterminals: self.Nonterminals[t] = [] self.Nonterminals[t].append(pnumber) # Create a production and add it to the list of productions p = Production(pnumber, prodname, syms, prodprec, func, file, line) self.Productions.append(p) self.Prodmap[map] = p # Add to the global productions list try: self.Prodnames[prodname].append(p) except KeyError: self.Prodnames[prodname] = [p] # ----------------------------------------------------------------------------- # set_start() # # Sets the starting symbol and creates the augmented grammar. Production # rule 0 is S' -> start where start is the start symbol. # ----------------------------------------------------------------------------- def set_start(self, start=None): if not start: start = self.Productions[1].name if start not in self.Nonterminals: raise GrammarError('start symbol %s undefined' % start) self.Productions[0] = Production(0, "S'", [start]) self.Nonterminals[start].append(0) self.Start = start # ----------------------------------------------------------------------------- # find_unreachable() # # Find all of the nonterminal symbols that can't be reached from the starting # symbol. Returns a list of nonterminals that can't be reached. # ----------------------------------------------------------------------------- def find_unreachable(self): # Mark all symbols that are reachable from a symbol s def mark_reachable_from(s): if s in reachable: return reachable.add(s) for p in self.Prodnames.get(s, []): for r in p.prod: mark_reachable_from(r) reachable = set() mark_reachable_from(self.Productions[0].prod[0]) return [s for s in self.Nonterminals if s not in reachable] # ----------------------------------------------------------------------------- # infinite_cycles() # # This function looks at the various parsing rules and tries to detect # infinite recursion cycles (grammar rules where there is no possible way # to derive a string of only terminals). # ----------------------------------------------------------------------------- def infinite_cycles(self): terminates = {} # Terminals: for t in self.Terminals: terminates[t] = True terminates['$end'] = True # Nonterminals: # Initialize to false: for n in self.Nonterminals: terminates[n] = False # Then propagate termination until no change: while True: some_change = False for (n, pl) in self.Prodnames.items(): # Nonterminal n terminates iff any of its productions terminates. for p in pl: # Production p terminates iff all of its rhs symbols terminate. for s in p.prod: if not terminates[s]: # The symbol s does not terminate, # so production p does not terminate. p_terminates = False break else: # didn't break from the loop, # so every symbol s terminates # so production p terminates. p_terminates = True if p_terminates: # symbol n terminates! if not terminates[n]: terminates[n] = True some_change = True # Don't need to consider any more productions for this n. break if not some_change: break infinite = [] for (s, term) in terminates.items(): if not term: if s not in self.Prodnames and s not in self.Terminals and s != 'error': # s is used-but-not-defined, and we've already warned of that, # so it would be overkill to say that it's also non-terminating. pass else: infinite.append(s) return infinite # ----------------------------------------------------------------------------- # undefined_symbols() # # Find all symbols that were used the grammar, but not defined as tokens or # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol # and prod is the production where the symbol was used. # ----------------------------------------------------------------------------- def undefined_symbols(self): result = [] for p in self.Productions: if not p: continue for s in p.prod: if s not in self.Prodnames and s not in self.Terminals and s != 'error': result.append((s, p)) return result # ----------------------------------------------------------------------------- # unused_terminals() # # Find all terminals that were defined, but not used by the grammar. Returns # a list of all symbols. # ----------------------------------------------------------------------------- def unused_terminals(self): unused_tok = [] for s, v in self.Terminals.items(): if s != 'error' and not v: unused_tok.append(s) return unused_tok # ------------------------------------------------------------------------------ # unused_rules() # # Find all grammar rules that were defined, but not used (maybe not reachable) # Returns a list of productions. # ------------------------------------------------------------------------------ def unused_rules(self): unused_prod = [] for s, v in self.Nonterminals.items(): if not v: p = self.Prodnames[s][0] unused_prod.append(p) return unused_prod # ----------------------------------------------------------------------------- # unused_precedence() # # Returns a list of tuples (term,precedence) corresponding to precedence # rules that were never used by the grammar. term is the name of the terminal # on which precedence was applied and precedence is a string such as 'left' or # 'right' corresponding to the type of precedence. # ----------------------------------------------------------------------------- def unused_precedence(self): unused = [] for termname in self.Precedence: if not (termname in self.Terminals or termname in self.UsedPrecedence): unused.append((termname, self.Precedence[termname][0])) return unused # ------------------------------------------------------------------------- # _first() # # Compute the value of FIRST1(beta) where beta is a tuple of symbols. # # During execution of compute_first1, the result may be incomplete. # Afterward (e.g., when called from compute_follow()), it will be complete. # ------------------------------------------------------------------------- def _first(self, beta): # We are computing First(x1,x2,x3,...,xn) result = [] for x in beta: x_produces_empty = False # Add all the non-<empty> symbols of First[x] to the result. for f in self.First[x]: if f == '<empty>': x_produces_empty = True else: if f not in result: result.append(f) if x_produces_empty: # We have to consider the next x in beta, # i.e. stay in the loop. pass else: # We don't have to consider any further symbols in beta. break else: # There was no 'break' from the loop, # so x_produces_empty was true for all x in beta, # so beta produces empty as well. result.append('<empty>') return result # ------------------------------------------------------------------------- # compute_first() # # Compute the value of FIRST1(X) for all symbols # ------------------------------------------------------------------------- def compute_first(self): if self.First: return self.First # Terminals: for t in self.Terminals: self.First[t] = [t] self.First['$end'] = ['$end'] # Nonterminals: # Initialize to the empty set: for n in self.Nonterminals: self.First[n] = [] # Then propagate symbols until no change: while True: some_change = False for n in self.Nonterminals: for p in self.Prodnames[n]: for f in self._first(p.prod): if f not in self.First[n]: self.First[n].append(f) some_change = True if not some_change: break return self.First # --------------------------------------------------------------------- # compute_follow() # # Computes all of the follow sets for every non-terminal symbol. The # follow set is the set of all symbols that might follow a given # non-terminal. See the Dragon book, 2nd Ed. p. 189. # --------------------------------------------------------------------- def compute_follow(self, start=None): # If already computed, return the result if self.Follow: return self.Follow # If first sets not computed yet, do that first. if not self.First: self.compute_first() # Add '$end' to the follow list of the start symbol for k in self.Nonterminals: self.Follow[k] = [] if not start: start = self.Productions[1].name self.Follow[start] = ['$end'] while True: didadd = False for p in self.Productions[1:]: # Here is the production set for i, B in enumerate(p.prod): if B in self.Nonterminals: # Okay. We got a non-terminal in a production fst = self._first(p.prod[i+1:]) hasempty = False for f in fst: if f != '<empty>' and f not in self.Follow[B]: self.Follow[B].append(f) didadd = True if f == '<empty>': hasempty = True if hasempty or i == (len(p.prod)-1): # Add elements of follow(a) to follow(b) for f in self.Follow[p.name]: if f not in self.Follow[B]: self.Follow[B].append(f) didadd = True if not didadd: break return self.Follow # ----------------------------------------------------------------------------- # build_lritems() # # This function walks the list of productions and builds a complete set of the # LR items. The LR items are stored in two ways: First, they are uniquely # numbered and placed in the list _lritems. Second, a linked list of LR items # is built for each production. For example: # # E -> E PLUS E # # Creates the list # # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ] # ----------------------------------------------------------------------------- def build_lritems(self): for p in self.Productions: lastlri = p i = 0 lr_items = [] while True: if i > len(p): lri = None else: lri = LRItem(p, i) # Precompute the list of productions immediately following try: lri.lr_after = self.Prodnames[lri.prod[i+1]] except (IndexError, KeyError): lri.lr_after = [] try: lri.lr_before = lri.prod[i-1] except IndexError: lri.lr_before = None lastlri.lr_next = lri if not lri: break lr_items.append(lri) lastlri = lri i += 1 p.lr_items = lr_items # ----------------------------------------------------------------------------- # == Class LRTable == # # This basic class represents a basic table of LR parsing information. # Methods for generating the tables are not defined here. They are defined # in the derived class LRGeneratedTable. # ----------------------------------------------------------------------------- class VersionError(YaccError): pass class LRTable(object): def __init__(self): self.lr_action = None self.lr_goto = None self.lr_productions = None self.lr_method = None def read_table(self, module): if isinstance(module, types.ModuleType): parsetab = module else: exec('import %s' % module) parsetab = sys.modules[module] if parsetab._tabversion != __tabversion__: raise VersionError('yacc table file version is out of date') self.lr_action = parsetab._lr_action self.lr_goto = parsetab._lr_goto self.lr_productions = [] for p in parsetab._lr_productions: self.lr_productions.append(MiniProduction(*p)) self.lr_method = parsetab._lr_method return parsetab._lr_signature def read_pickle(self, filename): try: import cPickle as pickle except ImportError: import pickle if not os.path.exists(filename): raise ImportError in_f = open(filename, 'rb') tabversion = pickle.load(in_f) if tabversion != __tabversion__: raise VersionError('yacc table file version is out of date') self.lr_method = pickle.load(in_f) signature = pickle.load(in_f) self.lr_action = pickle.load(in_f) self.lr_goto = pickle.load(in_f) productions = pickle.load(in_f) self.lr_productions = [] for p in productions: self.lr_productions.append(MiniProduction(*p)) in_f.close() return signature # Bind all production function names to callable objects in pdict def bind_callables(self, pdict): for p in self.lr_productions: p.bind(pdict) # ----------------------------------------------------------------------------- # === LR Generator === # # The following classes and functions are used to generate LR parsing tables on # a grammar. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # digraph() # traverse() # # The following two functions are used to compute set valued functions # of the form: # # F(x) = F'(x) U U{F(y) | x R y} # # This is used to compute the values of Read() sets as well as FOLLOW sets # in LALR(1) generation. # # Inputs: X - An input set # R - A relation # FP - Set-valued function # ------------------------------------------------------------------------------ def digraph(X, R, FP): N = {} for x in X: N[x] = 0 stack = [] F = {} for x in X: if N[x] == 0: traverse(x, N, stack, F, X, R, FP) return F def traverse(x, N, stack, F, X, R, FP): stack.append(x) d = len(stack) N[x] = d F[x] = FP(x) # F(X) <- F'(x) rel = R(x) # Get y's related to x for y in rel: if N[y] == 0: traverse(y, N, stack, F, X, R, FP) N[x] = min(N[x], N[y]) for a in F.get(y, []): if a not in F[x]: F[x].append(a) if N[x] == d: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() while element != x: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() class LALRError(YaccError): pass # ----------------------------------------------------------------------------- # == LRGeneratedTable == # # This class implements the LR table generation algorithm. There are no # public methods except for write() # ----------------------------------------------------------------------------- class LRGeneratedTable(LRTable): def __init__(self, grammar, method='LALR', log=None): if method not in ['SLR', 'LALR']: raise LALRError('Unsupported method %s' % method) self.grammar = grammar self.lr_method = method # Set up the logger if not log: log = NullLogger() self.log = log # Internal attributes self.lr_action = {} # Action table self.lr_goto = {} # Goto table self.lr_productions = grammar.Productions # Copy of grammar Production array self.lr_goto_cache = {} # Cache of computed gotos self.lr0_cidhash = {} # Cache of closures self._add_count = 0 # Internal counter used to detect cycles # Diagonistic information filled in by the table generator self.sr_conflict = 0 self.rr_conflict = 0 self.conflicts = [] # List of conflicts self.sr_conflicts = [] self.rr_conflicts = [] # Build the tables self.grammar.build_lritems() self.grammar.compute_first() self.grammar.compute_follow() self.lr_parse_table() # Compute the LR(0) closure operation on I, where I is a set of LR(0) items. def lr0_closure(self, I): self._add_count += 1 # Add everything in I to J J = I[:] didadd = True while didadd: didadd = False for j in J: for x in j.lr_after: if getattr(x, 'lr0_added', 0) == self._add_count: continue # Add B --> .G to J J.append(x.lr_next) x.lr0_added = self._add_count didadd = True return J # Compute the LR(0) goto function goto(I,X) where I is a set # of LR(0) items and X is a grammar symbol. This function is written # in a way that guarantees uniqueness of the generated goto sets # (i.e. the same goto set will never be returned as two different Python # objects). With uniqueness, we can later do fast set comparisons using # id(obj) instead of element-wise comparison. def lr0_goto(self, I, x): # First we look for a previously cached entry g = self.lr_goto_cache.get((id(I), x)) if g: return g # Now we generate the goto set in a way that guarantees uniqueness # of the result s = self.lr_goto_cache.get(x) if not s: s = {} self.lr_goto_cache[x] = s gs = [] for p in I: n = p.lr_next if n and n.lr_before == x: s1 = s.get(id(n)) if not s1: s1 = {} s[id(n)] = s1 gs.append(n) s = s1 g = s.get('$end') if not g: if gs: g = self.lr0_closure(gs) s['$end'] = g else: s['$end'] = gs self.lr_goto_cache[(id(I), x)] = g return g # Compute the LR(0) sets of item function def lr0_items(self): C = [self.lr0_closure([self.grammar.Productions[0].lr_next])] i = 0 for I in C: self.lr0_cidhash[id(I)] = i i += 1 # Loop over the items in C and each grammar symbols i = 0 while i < len(C): I = C[i] i += 1 # Collect all of the symbols that could possibly be in the goto(I,X) sets asyms = {} for ii in I: for s in ii.usyms: asyms[s] = None for x in asyms: g = self.lr0_goto(I, x) if not g or id(g) in self.lr0_cidhash: continue self.lr0_cidhash[id(g)] = len(C) C.append(g) return C # ----------------------------------------------------------------------------- # ==== LALR(1) Parsing ==== # # LALR(1) parsing is almost exactly the same as SLR except that instead of # relying upon Follow() sets when performing reductions, a more selective # lookahead set that incorporates the state of the LR(0) machine is utilized. # Thus, we mainly just have to focus on calculating the lookahead sets. # # The method used here is due to DeRemer and Pennelo (1982). # # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1) # Lookahead Sets", ACM Transactions on Programming Languages and Systems, # Vol. 4, No. 4, Oct. 1982, pp. 615-649 # # Further details can also be found in: # # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing", # McGraw-Hill Book Company, (1985). # # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # compute_nullable_nonterminals() # # Creates a dictionary containing all of the non-terminals that might produce # an empty production. # ----------------------------------------------------------------------------- def compute_nullable_nonterminals(self): nullable = set() num_nullable = 0 while True: for p in self.grammar.Productions[1:]: if p.len == 0: nullable.add(p.name) continue for t in p.prod: if t not in nullable: break else: nullable.add(p.name) if len(nullable) == num_nullable: break num_nullable = len(nullable) return nullable # ----------------------------------------------------------------------------- # find_nonterminal_trans(C) # # Given a set of LR(0) items, this functions finds all of the non-terminal # transitions. These are transitions in which a dot appears immediately before # a non-terminal. Returns a list of tuples of the form (state,N) where state # is the state number and N is the nonterminal symbol. # # The input C is the set of LR(0) items. # ----------------------------------------------------------------------------- def find_nonterminal_transitions(self, C): trans = [] for stateno, state in enumerate(C): for p in state: if p.lr_index < p.len - 1: t = (stateno, p.prod[p.lr_index+1]) if t[1] in self.grammar.Nonterminals: if t not in trans: trans.append(t) return trans # ----------------------------------------------------------------------------- # dr_relation() # # Computes the DR(p,A) relationships for non-terminal transitions. The input # is a tuple (state,N) where state is a number and N is a nonterminal symbol. # # Returns a list of terminals. # ----------------------------------------------------------------------------- def dr_relation(self, C, trans, nullable): dr_set = {} state, N = trans terms = [] g = self.lr0_goto(C[state], N) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index+1] if a in self.grammar.Terminals: if a not in terms: terms.append(a) # This extra bit is to handle the start state if state == 0 and N == self.grammar.Productions[0].prod[0]: terms.append('$end') return terms # ----------------------------------------------------------------------------- # reads_relation() # # Computes the READS() relation (p,A) READS (t,C). # ----------------------------------------------------------------------------- def reads_relation(self, C, trans, empty): # Look for empty transitions rel = [] state, N = trans g = self.lr0_goto(C[state], N) j = self.lr0_cidhash.get(id(g), -1) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index + 1] if a in empty: rel.append((j, a)) return rel # ----------------------------------------------------------------------------- # compute_lookback_includes() # # Determines the lookback and includes relations # # LOOKBACK: # # This relation is determined by running the LR(0) state machine forward. # For example, starting with a production "N : . A B C", we run it forward # to obtain "N : A B C ." We then build a relationship between this final # state and the starting state. These relationships are stored in a dictionary # lookdict. # # INCLUDES: # # Computes the INCLUDE() relation (p,A) INCLUDES (p',B). # # This relation is used to determine non-terminal transitions that occur # inside of other non-terminal transition states. (p,A) INCLUDES (p', B) # if the following holds: # # B -> LAT, where T -> epsilon and p' -L-> p # # L is essentially a prefix (which may be empty), T is a suffix that must be # able to derive an empty string. State p' must lead to state p with the string L. # # ----------------------------------------------------------------------------- def compute_lookback_includes(self, C, trans, nullable): lookdict = {} # Dictionary of lookback relations includedict = {} # Dictionary of include relations # Make a dictionary of non-terminal transitions dtrans = {} for t in trans: dtrans[t] = 1 # Loop over all transitions and compute lookbacks and includes for state, N in trans: lookb = [] includes = [] for p in C[state]: if p.name != N: continue # Okay, we have a name match. We now follow the production all the way # through the state machine until we get the . on the right hand side lr_index = p.lr_index j = state while lr_index < p.len - 1: lr_index = lr_index + 1 t = p.prod[lr_index] # Check to see if this symbol and state are a non-terminal transition if (j, t) in dtrans: # Yes. Okay, there is some chance that this is an includes relation # the only way to know for certain is whether the rest of the # production derives empty li = lr_index + 1 while li < p.len: if p.prod[li] in self.grammar.Terminals: break # No forget it if p.prod[li] not in nullable: break li = li + 1 else: # Appears to be a relation between (j,t) and (state,N) includes.append((j, t)) g = self.lr0_goto(C[j], t) # Go to next set j = self.lr0_cidhash.get(id(g), -1) # Go to next state # When we get here, j is the final state, now we have to locate the production for r in C[j]: if r.name != p.name: continue if r.len != p.len: continue i = 0 # This look is comparing a production ". A B C" with "A B C ." while i < r.lr_index: if r.prod[i] != p.prod[i+1]: break i = i + 1 else: lookb.append((j, r)) for i in includes: if i not in includedict: includedict[i] = [] includedict[i].append((state, N)) lookdict[(state, N)] = lookb return lookdict, includedict # ----------------------------------------------------------------------------- # compute_read_sets() # # Given a set of LR(0) items, this function computes the read sets. # # Inputs: C = Set of LR(0) items # ntrans = Set of nonterminal transitions # nullable = Set of empty transitions # # Returns a set containing the read sets # ----------------------------------------------------------------------------- def compute_read_sets(self, C, ntrans, nullable): FP = lambda x: self.dr_relation(C, x, nullable) R = lambda x: self.reads_relation(C, x, nullable) F = digraph(ntrans, R, FP) return F # ----------------------------------------------------------------------------- # compute_follow_sets() # # Given a set of LR(0) items, a set of non-terminal transitions, a readset, # and an include set, this function computes the follow sets # # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)} # # Inputs: # ntrans = Set of nonterminal transitions # readsets = Readset (previously computed) # inclsets = Include sets (previously computed) # # Returns a set containing the follow sets # ----------------------------------------------------------------------------- def compute_follow_sets(self, ntrans, readsets, inclsets): FP = lambda x: readsets[x] R = lambda x: inclsets.get(x, []) F = digraph(ntrans, R, FP) return F # ----------------------------------------------------------------------------- # add_lookaheads() # # Attaches the lookahead symbols to grammar rules. # # Inputs: lookbacks - Set of lookback relations # followset - Computed follow set # # This function directly attaches the lookaheads to productions contained # in the lookbacks set # ----------------------------------------------------------------------------- def add_lookaheads(self, lookbacks, followset): for trans, lb in lookbacks.items(): # Loop over productions in lookback for state, p in lb: if state not in p.lookaheads: p.lookaheads[state] = [] f = followset.get(trans, []) for a in f: if a not in p.lookaheads[state]: p.lookaheads[state].append(a) # ----------------------------------------------------------------------------- # add_lalr_lookaheads() # # This function does all of the work of adding lookahead information for use # with LALR parsing # ----------------------------------------------------------------------------- def add_lalr_lookaheads(self, C): # Determine all of the nullable nonterminals nullable = self.compute_nullable_nonterminals() # Find all non-terminal transitions trans = self.find_nonterminal_transitions(C) # Compute read sets readsets = self.compute_read_sets(C, trans, nullable) # Compute lookback/includes relations lookd, included = self.compute_lookback_includes(C, trans, nullable) # Compute LALR FOLLOW sets followsets = self.compute_follow_sets(trans, readsets, included) # Add all of the lookaheads self.add_lookaheads(lookd, followsets) # ----------------------------------------------------------------------------- # lr_parse_table() # # This function constructs the parse tables for SLR or LALR # ----------------------------------------------------------------------------- def lr_parse_table(self): Productions = self.grammar.Productions Precedence = self.grammar.Precedence goto = self.lr_goto # Goto array action = self.lr_action # Action array log = self.log # Logger for output actionp = {} # Action production array (temporary) log.info('Parsing method: %s', self.lr_method) # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items # This determines the number of states C = self.lr0_items() if self.lr_method == 'LALR': self.add_lalr_lookaheads(C) # Build the parser table, state by state st = 0 for I in C: # Loop over each production in I actlist = [] # List of actions st_action = {} st_actionp = {} st_goto = {} log.info('') log.info('state %d', st) log.info('') for p in I: log.info(' (%d) %s', p.number, p) log.info('') for p in I: if p.len == p.lr_index + 1: if p.name == "S'": # Start symbol. Accept! st_action['$end'] = 0 st_actionp['$end'] = p else: # We are at the end of a production. Reduce! if self.lr_method == 'LALR': laheads = p.lookaheads[st] else: laheads = self.grammar.Follow[p.name] for a in laheads: actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p))) r = st_action.get(a) if r is not None: # Whoa. Have a shift/reduce or reduce/reduce conflict if r > 0: # Need to decide on shift or reduce here # By default we favor shifting. Need to add # some precedence rules here. # Shift precedence comes from the token sprec, slevel = Precedence.get(a, ('right', 0)) # Reduce precedence comes from rule being reduced (p) rprec, rlevel = Productions[p.number].prec if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): # We really need to reduce here. st_action[a] = -p.number st_actionp[a] = p if not slevel and not rlevel: log.info(' ! shift/reduce conflict for %s resolved as reduce', a) self.sr_conflicts.append((st, a, 'reduce')) Productions[p.number].reduced += 1 elif (slevel == rlevel) and (rprec == 'nonassoc'): st_action[a] = None else: # Hmmm. Guess we'll keep the shift if not rlevel: log.info(' ! shift/reduce conflict for %s resolved as shift', a) self.sr_conflicts.append((st, a, 'shift')) elif r < 0: # Reduce/reduce conflict. In this case, we favor the rule # that was defined first in the grammar file oldp = Productions[-r] pp = Productions[p.number] if oldp.line > pp.line: st_action[a] = -p.number st_actionp[a] = p chosenp, rejectp = pp, oldp Productions[p.number].reduced += 1 Productions[oldp.number].reduced -= 1 else: chosenp, rejectp = oldp, pp self.rr_conflicts.append((st, chosenp, rejectp)) log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)', a, st_actionp[a].number, st_actionp[a]) else: raise LALRError('Unknown conflict in state %d' % st) else: st_action[a] = -p.number st_actionp[a] = p Productions[p.number].reduced += 1 else: i = p.lr_index a = p.prod[i+1] # Get symbol right after the "." if a in self.grammar.Terminals: g = self.lr0_goto(I, a) j = self.lr0_cidhash.get(id(g), -1) if j >= 0: # We are in a shift state actlist.append((a, p, 'shift and go to state %d' % j)) r = st_action.get(a) if r is not None: # Whoa have a shift/reduce or shift/shift conflict if r > 0: if r != j: raise LALRError('Shift/shift conflict in state %d' % st) elif r < 0: # Do a precedence check. # - if precedence of reduce rule is higher, we reduce. # - if precedence of reduce is same and left assoc, we reduce. # - otherwise we shift # Shift precedence comes from the token sprec, slevel = Precedence.get(a, ('right', 0)) # Reduce precedence comes from the rule that could have been reduced rprec, rlevel = Productions[st_actionp[a].number].prec if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')): # We decide to shift here... highest precedence to shift Productions[st_actionp[a].number].reduced -= 1 st_action[a] = j st_actionp[a] = p if not rlevel: log.info(' ! shift/reduce conflict for %s resolved as shift', a) self.sr_conflicts.append((st, a, 'shift')) elif (slevel == rlevel) and (rprec == 'nonassoc'): st_action[a] = None else: # Hmmm. Guess we'll keep the reduce if not slevel and not rlevel: log.info(' ! shift/reduce conflict for %s resolved as reduce', a) self.sr_conflicts.append((st, a, 'reduce')) else: raise LALRError('Unknown conflict in state %d' % st) else: st_action[a] = j st_actionp[a] = p # Print the actions associated with each terminal _actprint = {} for a, p, m in actlist: if a in st_action: if p is st_actionp[a]: log.info(' %-15s %s', a, m) _actprint[(a, m)] = 1 log.info('') # Print the actions that were not used. (debugging) not_used = 0 for a, p, m in actlist: if a in st_action: if p is not st_actionp[a]: if not (a, m) in _actprint: log.debug(' ! %-15s [ %s ]', a, m) not_used = 1 _actprint[(a, m)] = 1 if not_used: log.debug('') # Construct the goto table for this state nkeys = {} for ii in I: for s in ii.usyms: if s in self.grammar.Nonterminals: nkeys[s] = None for n in nkeys: g = self.lr0_goto(I, n) j = self.lr0_cidhash.get(id(g), -1) if j >= 0: st_goto[n] = j log.info(' %-30s shift and go to state %d', n, j) action[st] = st_action actionp[st] = st_actionp goto[st] = st_goto st += 1 # ----------------------------------------------------------------------------- # write() # # This function writes the LR parsing tables to a file # ----------------------------------------------------------------------------- def write_table(self, tabmodule, outputdir='', signature=''): if isinstance(tabmodule, types.ModuleType): raise IOError("Won't overwrite existing tabmodule") basemodulename = tabmodule.split('.')[-1] filename = os.path.join(outputdir, basemodulename) + '.py' try: f = open(filename, 'w') f.write(''' # %s # This file is automatically generated. Do not edit. _tabversion = %r _lr_method = %r _lr_signature = %r ''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature)) # Change smaller to 0 to go back to original tables smaller = 1 # Factor out names to try and make smaller if smaller: items = {} for s, nd in self.lr_action.items(): for name, v in nd.items(): i = items.get(name) if not i: i = ([], []) items[name] = i i[0].append(s) i[1].append(v) f.write('\n_lr_action_items = {') for k, v in items.items(): f.write('%r:([' % k) for i in v[0]: f.write('%r,' % i) f.write('],[') for i in v[1]: f.write('%r,' % i) f.write(']),') f.write('}\n') f.write(''' _lr_action = {} for _k, _v in _lr_action_items.items(): for _x,_y in zip(_v[0],_v[1]): if not _x in _lr_action: _lr_action[_x] = {} _lr_action[_x][_k] = _y del _lr_action_items ''') else: f.write('\n_lr_action = { ') for k, v in self.lr_action.items(): f.write('(%r,%r):%r,' % (k[0], k[1], v)) f.write('}\n') if smaller: # Factor out names to try and make smaller items = {} for s, nd in self.lr_goto.items(): for name, v in nd.items(): i = items.get(name) if not i: i = ([], []) items[name] = i i[0].append(s) i[1].append(v) f.write('\n_lr_goto_items = {') for k, v in items.items(): f.write('%r:([' % k) for i in v[0]: f.write('%r,' % i) f.write('],[') for i in v[1]: f.write('%r,' % i) f.write(']),') f.write('}\n') f.write(''' _lr_goto = {} for _k, _v in _lr_goto_items.items(): for _x, _y in zip(_v[0], _v[1]): if not _x in _lr_goto: _lr_goto[_x] = {} _lr_goto[_x][_k] = _y del _lr_goto_items ''') else: f.write('\n_lr_goto = { ') for k, v in self.lr_goto.items(): f.write('(%r,%r):%r,' % (k[0], k[1], v)) f.write('}\n') # Write production table f.write('_lr_productions = [\n') for p in self.lr_productions: if p.func: f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line)) else: f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len)) f.write(']\n') f.close() except IOError as e: raise # ----------------------------------------------------------------------------- # pickle_table() # # This function pickles the LR parsing tables to a supplied file object # ----------------------------------------------------------------------------- def pickle_table(self, filename, signature=''): try: import cPickle as pickle except ImportError: import pickle with open(filename, 'wb') as outf: pickle.dump(__tabversion__, outf, pickle_protocol) pickle.dump(self.lr_method, outf, pickle_protocol) pickle.dump(signature, outf, pickle_protocol) pickle.dump(self.lr_action, outf, pickle_protocol) pickle.dump(self.lr_goto, outf, pickle_protocol) outp = [] for p in self.lr_productions: if p.func: outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line)) else: outp.append((str(p), p.name, p.len, None, None, None)) pickle.dump(outp, outf, pickle_protocol) # ----------------------------------------------------------------------------- # === INTROSPECTION === # # The following functions and classes are used to implement the PLY # introspection features followed by the yacc() function itself. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # get_caller_module_dict() # # This function returns a dictionary containing all of the symbols defined within # a caller further down the call stack. This is used to get the environment # associated with the yacc() call if none was provided. # ----------------------------------------------------------------------------- def get_caller_module_dict(levels): f = sys._getframe(levels) ldict = f.f_globals.copy() if f.f_globals != f.f_locals: ldict.update(f.f_locals) return ldict # ----------------------------------------------------------------------------- # parse_grammar() # # This takes a raw grammar rule string and parses it into production data # ----------------------------------------------------------------------------- def parse_grammar(doc, file, line): grammar = [] # Split the doc string into lines pstrings = doc.splitlines() lastp = None dline = line for ps in pstrings: dline += 1 p = ps.split() if not p: continue try: if p[0] == '|': # This is a continuation of a previous rule if not lastp: raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline)) prodname = lastp syms = p[1:] else: prodname = p[0] lastp = prodname syms = p[2:] assign = p[1] if assign != ':' and assign != '::=': raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline)) grammar.append((file, dline, prodname, syms)) except SyntaxError: raise except Exception: raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip())) return grammar # ----------------------------------------------------------------------------- # ParserReflect() # # This class represents information extracted for building a parser including # start symbol, error function, tokens, precedence list, action functions, # etc. # ----------------------------------------------------------------------------- class ParserReflect(object): def __init__(self, pdict, log=None): self.pdict = pdict self.start = None self.error_func = None self.tokens = None self.modules = set() self.grammar = [] self.error = False if log is None: self.log = PlyLogger(sys.stderr) else: self.log = log # Get all of the basic information def get_all(self): self.get_start() self.get_error_func() self.get_tokens() self.get_precedence() self.get_pfunctions() # Validate all of the information def validate_all(self): self.validate_start() self.validate_error_func() self.validate_tokens() self.validate_precedence() self.validate_pfunctions() self.validate_modules() return self.error # Compute a signature over the grammar def signature(self): parts = [] try: if self.start: parts.append(self.start) if self.prec: parts.append(''.join([''.join(p) for p in self.prec])) if self.tokens: parts.append(' '.join(self.tokens)) for f in self.pfuncs: if f[3]: parts.append(f[3]) except (TypeError, ValueError): pass return ''.join(parts) # ----------------------------------------------------------------------------- # validate_modules() # # This method checks to see if there are duplicated p_rulename() functions # in the parser module file. Without this function, it is really easy for # users to make mistakes by cutting and pasting code fragments (and it's a real # bugger to try and figure out why the resulting parser doesn't work). Therefore, # we just do a little regular expression pattern matching of def statements # to try and detect duplicates. # ----------------------------------------------------------------------------- def validate_modules(self): # Match def p_funcname( fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(') for module in self.modules: try: lines, linen = inspect.getsourcelines(module) except IOError: continue counthash = {} for linen, line in enumerate(lines): linen += 1 m = fre.match(line) if m: name = m.group(1) prev = counthash.get(name) if not prev: counthash[name] = linen else: filename = inspect.getsourcefile(module) self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d', filename, linen, name, prev) # Get the start symbol def get_start(self): self.start = self.pdict.get('start') # Validate the start symbol def validate_start(self): if self.start is not None: if not isinstance(self.start, string_types): self.log.error("'start' must be a string") # Look for error handler def get_error_func(self): self.error_func = self.pdict.get('p_error') # Validate the error function def validate_error_func(self): if self.error_func: if isinstance(self.error_func, types.FunctionType): ismethod = 0 elif isinstance(self.error_func, types.MethodType): ismethod = 1 else: self.log.error("'p_error' defined, but is not a function or method") self.error = True return eline = self.error_func.__code__.co_firstlineno efile = self.error_func.__code__.co_filename module = inspect.getmodule(self.error_func) self.modules.add(module) argcount = self.error_func.__code__.co_argcount - ismethod if argcount != 1: self.log.error('%s:%d: p_error() requires 1 argument', efile, eline) self.error = True # Get the tokens map def get_tokens(self): tokens = self.pdict.get('tokens') if not tokens: self.log.error('No token list is defined') self.error = True return if not isinstance(tokens, (list, tuple)): self.log.error('tokens must be a list or tuple') self.error = True return if not tokens: self.log.error('tokens is empty') self.error = True return self.tokens = tokens # Validate the tokens def validate_tokens(self): # Validate the tokens. if 'error' in self.tokens: self.log.error("Illegal token name 'error'. Is a reserved word") self.error = True return terminals = set() for n in self.tokens: if n in terminals: self.log.warning('Token %r multiply defined', n) terminals.add(n) # Get the precedence map (if any) def get_precedence(self): self.prec = self.pdict.get('precedence') # Validate and parse the precedence map def validate_precedence(self): preclist = [] if self.prec: if not isinstance(self.prec, (list, tuple)): self.log.error('precedence must be a list or tuple') self.error = True return for level, p in enumerate(self.prec): if not isinstance(p, (list, tuple)): self.log.error('Bad precedence table') self.error = True return if len(p) < 2: self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p) self.error = True return assoc = p[0] if not isinstance(assoc, string_types): self.log.error('precedence associativity must be a string') self.error = True return for term in p[1:]: if not isinstance(term, string_types): self.log.error('precedence items must be strings') self.error = True return preclist.append((term, assoc, level+1)) self.preclist = preclist # Get all p_functions from the grammar def get_pfunctions(self): p_functions = [] for name, item in self.pdict.items(): if not name.startswith('p_') or name == 'p_error': continue if isinstance(item, (types.FunctionType, types.MethodType)): line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno) module = inspect.getmodule(item) p_functions.append((line, module, name, item.__doc__)) # Sort all of the actions by line number; make sure to stringify # modules to make them sortable, since `line` may not uniquely sort all # p functions p_functions.sort(key=lambda p_function: ( p_function[0], str(p_function[1]), p_function[2], p_function[3])) self.pfuncs = p_functions # Validate all of the p_functions def validate_pfunctions(self): grammar = [] # Check for non-empty symbols if len(self.pfuncs) == 0: self.log.error('no rules of the form p_rulename are defined') self.error = True return for line, module, name, doc in self.pfuncs: file = inspect.getsourcefile(module) func = self.pdict[name] if isinstance(func, types.MethodType): reqargs = 2 else: reqargs = 1 if func.__code__.co_argcount > reqargs: self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__) self.error = True elif func.__code__.co_argcount < reqargs: self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__) self.error = True elif not func.__doc__: self.log.warning('%s:%d: No documentation string specified in function %r (ignored)', file, line, func.__name__) else: try: parsed_g = parse_grammar(doc, file, line) for g in parsed_g: grammar.append((name, g)) except SyntaxError as e: self.log.error(str(e)) self.error = True # Looks like a valid grammar rule # Mark the file in which defined. self.modules.add(module) # Secondary validation step that looks for p_ definitions that are not functions # or functions that look like they might be grammar rules. for n, v in self.pdict.items(): if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)): continue if n.startswith('t_'): continue if n.startswith('p_') and n != 'p_error': self.log.warning('%r not defined as a function', n) if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or (isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)): if v.__doc__: try: doc = v.__doc__.split(' ') if doc[1] == ':': self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix', v.__code__.co_filename, v.__code__.co_firstlineno, n) except IndexError: pass self.grammar = grammar # ----------------------------------------------------------------------------- # yacc(module) # # Build a parser # ----------------------------------------------------------------------------- def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file, outputdir=None, debuglog=None, errorlog=None, picklefile=None): if tabmodule is None: tabmodule = tab_module # Reference to the parsing method of the last built parser global parse # If pickling is enabled, table files are not created if picklefile: write_tables = 0 if errorlog is None: errorlog = PlyLogger(sys.stderr) # Get the module dictionary used for the parser if module: _items = [(k, getattr(module, k)) for k in dir(module)] pdict = dict(_items) # If no __file__ attribute is available, try to obtain it from the __module__ instead if '__file__' not in pdict: pdict['__file__'] = sys.modules[pdict['__module__']].__file__ else: pdict = get_caller_module_dict(2) if outputdir is None: # If no output directory is set, the location of the output files # is determined according to the following rules: # - If tabmodule specifies a package, files go into that package directory # - Otherwise, files go in the same directory as the specifying module if isinstance(tabmodule, types.ModuleType): srcfile = tabmodule.__file__ else: if '.' not in tabmodule: srcfile = pdict['__file__'] else: parts = tabmodule.split('.') pkgname = '.'.join(parts[:-1]) exec('import %s' % pkgname) srcfile = getattr(sys.modules[pkgname], '__file__', '') outputdir = os.path.dirname(srcfile) # Determine if the module is package of a package or not. # If so, fix the tabmodule setting so that tables load correctly pkg = pdict.get('__package__') if pkg and isinstance(tabmodule, str): if '.' not in tabmodule: tabmodule = pkg + '.' + tabmodule # Set start symbol if it's specified directly using an argument if start is not None: pdict['start'] = start # Collect parser information from the dictionary pinfo = ParserReflect(pdict, log=errorlog) pinfo.get_all() if pinfo.error: raise YaccError('Unable to build parser') # Check signature against table files (if any) signature = pinfo.signature() # Read the tables try: lr = LRTable() if picklefile: read_signature = lr.read_pickle(picklefile) else: read_signature = lr.read_table(tabmodule) if optimize or (read_signature == signature): try: lr.bind_callables(pinfo.pdict) parser = LRParser(lr, pinfo.error_func) parse = parser.parse return parser except Exception as e: errorlog.warning('There was a problem loading the table file: %r', e) except VersionError as e: errorlog.warning(str(e)) except ImportError: pass if debuglog is None: if debug: try: debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w')) except IOError as e: errorlog.warning("Couldn't open %r. %s" % (debugfile, e)) debuglog = NullLogger() else: debuglog = NullLogger() debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__) errors = False # Validate the parser information if pinfo.validate_all(): raise YaccError('Unable to build parser') if not pinfo.error_func: errorlog.warning('no p_error() function is defined') # Create a grammar object grammar = Grammar(pinfo.tokens) # Set precedence level for terminals for term, assoc, level in pinfo.preclist: try: grammar.set_precedence(term, assoc, level) except GrammarError as e: errorlog.warning('%s', e) # Add productions to the grammar for funcname, gram in pinfo.grammar: file, line, prodname, syms = gram try: grammar.add_production(prodname, syms, funcname, file, line) except GrammarError as e: errorlog.error('%s', e) errors = True # Set the grammar start symbols try: if start is None: grammar.set_start(pinfo.start) else: grammar.set_start(start) except GrammarError as e: errorlog.error(str(e)) errors = True if errors: raise YaccError('Unable to build parser') # Verify the grammar structure undefined_symbols = grammar.undefined_symbols() for sym, prod in undefined_symbols: errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym) errors = True unused_terminals = grammar.unused_terminals() if unused_terminals: debuglog.info('') debuglog.info('Unused terminals:') debuglog.info('') for term in unused_terminals: errorlog.warning('Token %r defined, but not used', term) debuglog.info(' %s', term) # Print out all productions to the debug log if debug: debuglog.info('') debuglog.info('Grammar') debuglog.info('') for n, p in enumerate(grammar.Productions): debuglog.info('Rule %-5d %s', n, p) # Find unused non-terminals unused_rules = grammar.unused_rules() for prod in unused_rules: errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name) if len(unused_terminals) == 1: errorlog.warning('There is 1 unused token') if len(unused_terminals) > 1: errorlog.warning('There are %d unused tokens', len(unused_terminals)) if len(unused_rules) == 1: errorlog.warning('There is 1 unused rule') if len(unused_rules) > 1: errorlog.warning('There are %d unused rules', len(unused_rules)) if debug: debuglog.info('') debuglog.info('Terminals, with rules where they appear') debuglog.info('') terms = list(grammar.Terminals) terms.sort() for term in terms: debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]])) debuglog.info('') debuglog.info('Nonterminals, with rules where they appear') debuglog.info('') nonterms = list(grammar.Nonterminals) nonterms.sort() for nonterm in nonterms: debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]])) debuglog.info('') if check_recursion: unreachable = grammar.find_unreachable() for u in unreachable: errorlog.warning('Symbol %r is unreachable', u) infinite = grammar.infinite_cycles() for inf in infinite: errorlog.error('Infinite recursion detected for symbol %r', inf) errors = True unused_prec = grammar.unused_precedence() for term, assoc in unused_prec: errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term) errors = True if errors: raise YaccError('Unable to build parser') # Run the LRGeneratedTable on the grammar if debug: errorlog.debug('Generating %s tables', method) lr = LRGeneratedTable(grammar, method, debuglog) if debug: num_sr = len(lr.sr_conflicts) # Report shift/reduce and reduce/reduce conflicts if num_sr == 1: errorlog.warning('1 shift/reduce conflict') elif num_sr > 1: errorlog.warning('%d shift/reduce conflicts', num_sr) num_rr = len(lr.rr_conflicts) if num_rr == 1: errorlog.warning('1 reduce/reduce conflict') elif num_rr > 1: errorlog.warning('%d reduce/reduce conflicts', num_rr) # Write out conflicts to the output file if debug and (lr.sr_conflicts or lr.rr_conflicts): debuglog.warning('') debuglog.warning('Conflicts:') debuglog.warning('') for state, tok, resolution in lr.sr_conflicts: debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution) already_reported = set() for state, rule, rejected in lr.rr_conflicts: if (state, id(rule), id(rejected)) in already_reported: continue debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) debuglog.warning('rejected rule (%s) in state %d', rejected, state) errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) errorlog.warning('rejected rule (%s) in state %d', rejected, state) already_reported.add((state, id(rule), id(rejected))) warned_never = [] for state, rule, rejected in lr.rr_conflicts: if not rejected.reduced and (rejected not in warned_never): debuglog.warning('Rule (%s) is never reduced', rejected) errorlog.warning('Rule (%s) is never reduced', rejected) warned_never.append(rejected) # Write the table file if requested if write_tables: try: lr.write_table(tabmodule, outputdir, signature) except IOError as e: errorlog.warning("Couldn't create %r. %s" % (tabmodule, e)) # Write a pickled version of the tables if picklefile: try: lr.pickle_table(picklefile, signature) except IOError as e: errorlog.warning("Couldn't create %r. %s" % (picklefile, e)) # Build the parser lr.bind_callables(pinfo.pdict) parser = LRParser(lr, pinfo.error_func) parse = parser.parse return parser
137,323
Python
38.291559
119
0.467358
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ply/__init__.py
# PLY package # Author: David Beazley ([email protected]) __version__ = '3.9' __all__ = ['lex','yacc']
102
Python
16.166664
41
0.588235
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ply/ctokens.py
# ---------------------------------------------------------------------- # ctokens.py # # Token specifications for symbols in ANSI C and C++. This file is # meant to be used as a library in other tokenizers. # ---------------------------------------------------------------------- # Reserved words tokens = [ # Literals (identifier, integer constant, float constant, string constant, char const) 'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER', # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=) 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO', 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT', 'LOR', 'LAND', 'LNOT', 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE', # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=) 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL', 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL', # Increment/decrement (++,--) 'INCREMENT', 'DECREMENT', # Structure dereference (->) 'ARROW', # Ternary operator (?) 'TERNARY', # Delimeters ( ) [ ] { } , . ; : 'LPAREN', 'RPAREN', 'LBRACKET', 'RBRACKET', 'LBRACE', 'RBRACE', 'COMMA', 'PERIOD', 'SEMI', 'COLON', # Ellipsis (...) 'ELLIPSIS', ] # Operators t_PLUS = r'\+' t_MINUS = r'-' t_TIMES = r'\*' t_DIVIDE = r'/' t_MODULO = r'%' t_OR = r'\|' t_AND = r'&' t_NOT = r'~' t_XOR = r'\^' t_LSHIFT = r'<<' t_RSHIFT = r'>>' t_LOR = r'\|\|' t_LAND = r'&&' t_LNOT = r'!' t_LT = r'<' t_GT = r'>' t_LE = r'<=' t_GE = r'>=' t_EQ = r'==' t_NE = r'!=' # Assignment operators t_EQUALS = r'=' t_TIMESEQUAL = r'\*=' t_DIVEQUAL = r'/=' t_MODEQUAL = r'%=' t_PLUSEQUAL = r'\+=' t_MINUSEQUAL = r'-=' t_LSHIFTEQUAL = r'<<=' t_RSHIFTEQUAL = r'>>=' t_ANDEQUAL = r'&=' t_OREQUAL = r'\|=' t_XOREQUAL = r'\^=' # Increment/decrement t_INCREMENT = r'\+\+' t_DECREMENT = r'--' # -> t_ARROW = r'->' # ? t_TERNARY = r'\?' # Delimeters t_LPAREN = r'\(' t_RPAREN = r'\)' t_LBRACKET = r'\[' t_RBRACKET = r'\]' t_LBRACE = r'\{' t_RBRACE = r'\}' t_COMMA = r',' t_PERIOD = r'\.' t_SEMI = r';' t_COLON = r':' t_ELLIPSIS = r'\.\.\.' # Identifiers t_ID = r'[A-Za-z_][A-Za-z0-9_]*' # Integer literal t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?' # Floating literal t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' # String literal t_STRING = r'\"([^\\\n]|(\\.))*?\"' # Character constant 'c' or L'c' t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\'' # Comment (C-Style) def t_COMMENT(t): r'/\*(.|\n)*?\*/' t.lexer.lineno += t.value.count('\n') return t # Comment (C++-Style) def t_CPPCOMMENT(t): r'//.*\n' t.lexer.lineno += 1 return t
3,177
Python
22.716418
90
0.393768
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ply/ygen.py
# ply: ygen.py # # This is a support program that auto-generates different versions of the YACC parsing # function with different features removed for the purposes of performance. # # Users should edit the method LParser.parsedebug() in yacc.py. The source code # for that method is then used to create the other methods. See the comments in # yacc.py for further details. import os.path import shutil def get_source_range(lines, tag): srclines = enumerate(lines) start_tag = '#--! %s-start' % tag end_tag = '#--! %s-end' % tag for start_index, line in srclines: if line.strip().startswith(start_tag): break for end_index, line in srclines: if line.strip().endswith(end_tag): break return (start_index + 1, end_index) def filter_section(lines, tag): filtered_lines = [] include = True tag_text = '#--! %s' % tag for line in lines: if line.strip().startswith(tag_text): include = not include elif include: filtered_lines.append(line) return filtered_lines def main(): dirname = os.path.dirname(__file__) shutil.copy2(os.path.join(dirname, 'yacc.py'), os.path.join(dirname, 'yacc.py.bak')) with open(os.path.join(dirname, 'yacc.py'), 'r') as f: lines = f.readlines() parse_start, parse_end = get_source_range(lines, 'parsedebug') parseopt_start, parseopt_end = get_source_range(lines, 'parseopt') parseopt_notrack_start, parseopt_notrack_end = get_source_range(lines, 'parseopt-notrack') # Get the original source orig_lines = lines[parse_start:parse_end] # Filter the DEBUG sections out parseopt_lines = filter_section(orig_lines, 'DEBUG') # Filter the TRACKING sections out parseopt_notrack_lines = filter_section(parseopt_lines, 'TRACKING') # Replace the parser source sections with updated versions lines[parseopt_notrack_start:parseopt_notrack_end] = parseopt_notrack_lines lines[parseopt_start:parseopt_end] = parseopt_lines lines = [line.rstrip()+'\n' for line in lines] with open(os.path.join(dirname, 'yacc.py'), 'w') as f: f.writelines(lines) print('Updated yacc.py') if __name__ == '__main__': main()
2,251
Python
29.026666
94
0.657486
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ply/lex.py
# ----------------------------------------------------------------------------- # ply: lex.py # # Copyright (C) 2001-2017 # David M. Beazley (Dabeaz LLC) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the David Beazley or Dabeaz LLC may be used to # endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- __version__ = '3.10' __tabversion__ = '3.10' import re import sys import types import copy import os import inspect # This tuple contains known string types try: # Python 2.6 StringTypes = (types.StringType, types.UnicodeType) except AttributeError: # Python 3.0 StringTypes = (str, bytes) # This regular expression is used to match valid token names _is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') # Exception thrown when invalid token encountered and no default error # handler is defined. class LexError(Exception): def __init__(self, message, s): self.args = (message,) self.text = s # Token class. This class is used to represent the tokens produced. class LexToken(object): def __str__(self): return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos) def __repr__(self): return str(self) # This object is a stand-in for a logging object created by the # logging module. class PlyLogger(object): def __init__(self, f): self.f = f def critical(self, msg, *args, **kwargs): self.f.write((msg % args) + '\n') def warning(self, msg, *args, **kwargs): self.f.write('WARNING: ' + (msg % args) + '\n') def error(self, msg, *args, **kwargs): self.f.write('ERROR: ' + (msg % args) + '\n') info = critical debug = critical # Null logger is used when no output is generated. Does nothing. class NullLogger(object): def __getattribute__(self, name): return self def __call__(self, *args, **kwargs): return self # ----------------------------------------------------------------------------- # === Lexing Engine === # # The following Lexer class implements the lexer runtime. There are only # a few public methods and attributes: # # input() - Store a new string in the lexer # token() - Get the next token # clone() - Clone the lexer # # lineno - Current line number # lexpos - Current position in the input string # ----------------------------------------------------------------------------- class Lexer: def __init__(self): self.lexre = None # Master regular expression. This is a list of # tuples (re, findex) where re is a compiled # regular expression and findex is a list # mapping regex group numbers to rules self.lexretext = None # Current regular expression strings self.lexstatere = {} # Dictionary mapping lexer states to master regexs self.lexstateretext = {} # Dictionary mapping lexer states to regex strings self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names self.lexstate = 'INITIAL' # Current lexer state self.lexstatestack = [] # Stack of lexer states self.lexstateinfo = None # State information self.lexstateignore = {} # Dictionary of ignored characters for each state self.lexstateerrorf = {} # Dictionary of error functions for each state self.lexstateeoff = {} # Dictionary of eof functions for each state self.lexreflags = 0 # Optional re compile flags self.lexdata = None # Actual input data (as a string) self.lexpos = 0 # Current position in input text self.lexlen = 0 # Length of the input text self.lexerrorf = None # Error rule (if any) self.lexeoff = None # EOF rule (if any) self.lextokens = None # List of valid tokens self.lexignore = '' # Ignored characters self.lexliterals = '' # Literal characters that can be passed through self.lexmodule = None # Module self.lineno = 1 # Current line number self.lexoptimize = False # Optimized mode def clone(self, object=None): c = copy.copy(self) # If the object parameter has been supplied, it means we are attaching the # lexer to a new object. In this case, we have to rebind all methods in # the lexstatere and lexstateerrorf tables. if object: newtab = {} for key, ritem in self.lexstatere.items(): newre = [] for cre, findex in ritem: newfindex = [] for f in findex: if not f or not f[0]: newfindex.append(f) continue newfindex.append((getattr(object, f[0].__name__), f[1])) newre.append((cre, newfindex)) newtab[key] = newre c.lexstatere = newtab c.lexstateerrorf = {} for key, ef in self.lexstateerrorf.items(): c.lexstateerrorf[key] = getattr(object, ef.__name__) c.lexmodule = object return c # ------------------------------------------------------------ # writetab() - Write lexer information to a table file # ------------------------------------------------------------ def writetab(self, lextab, outputdir=''): if isinstance(lextab, types.ModuleType): raise IOError("Won't overwrite existing lextab module") basetabmodule = lextab.split('.')[-1] filename = os.path.join(outputdir, basetabmodule) + '.py' with open(filename, 'w') as tf: tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__)) tf.write('_tabversion = %s\n' % repr(__tabversion__)) tf.write('_lextokens = set(%s)\n' % repr(tuple(self.lextokens))) tf.write('_lexreflags = %s\n' % repr(self.lexreflags)) tf.write('_lexliterals = %s\n' % repr(self.lexliterals)) tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo)) # Rewrite the lexstatere table, replacing function objects with function names tabre = {} for statename, lre in self.lexstatere.items(): titem = [] for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]): titem.append((retext, _funcs_to_names(func, renames))) tabre[statename] = titem tf.write('_lexstatere = %s\n' % repr(tabre)) tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore)) taberr = {} for statename, ef in self.lexstateerrorf.items(): taberr[statename] = ef.__name__ if ef else None tf.write('_lexstateerrorf = %s\n' % repr(taberr)) tabeof = {} for statename, ef in self.lexstateeoff.items(): tabeof[statename] = ef.__name__ if ef else None tf.write('_lexstateeoff = %s\n' % repr(tabeof)) # ------------------------------------------------------------ # readtab() - Read lexer information from a tab file # ------------------------------------------------------------ def readtab(self, tabfile, fdict): if isinstance(tabfile, types.ModuleType): lextab = tabfile else: exec('import %s' % tabfile) lextab = sys.modules[tabfile] if getattr(lextab, '_tabversion', '0.0') != __tabversion__: raise ImportError('Inconsistent PLY version') self.lextokens = lextab._lextokens self.lexreflags = lextab._lexreflags self.lexliterals = lextab._lexliterals self.lextokens_all = self.lextokens | set(self.lexliterals) self.lexstateinfo = lextab._lexstateinfo self.lexstateignore = lextab._lexstateignore self.lexstatere = {} self.lexstateretext = {} for statename, lre in lextab._lexstatere.items(): titem = [] txtitem = [] for pat, func_name in lre: titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict))) self.lexstatere[statename] = titem self.lexstateretext[statename] = txtitem self.lexstateerrorf = {} for statename, ef in lextab._lexstateerrorf.items(): self.lexstateerrorf[statename] = fdict[ef] self.lexstateeoff = {} for statename, ef in lextab._lexstateeoff.items(): self.lexstateeoff[statename] = fdict[ef] self.begin('INITIAL') # ------------------------------------------------------------ # input() - Push a new string into the lexer # ------------------------------------------------------------ def input(self, s): # Pull off the first character to see if s looks like a string c = s[:1] if not isinstance(c, StringTypes): raise ValueError('Expected a string') self.lexdata = s self.lexpos = 0 self.lexlen = len(s) # ------------------------------------------------------------ # begin() - Changes the lexing state # ------------------------------------------------------------ def begin(self, state): if state not in self.lexstatere: raise ValueError('Undefined state') self.lexre = self.lexstatere[state] self.lexretext = self.lexstateretext[state] self.lexignore = self.lexstateignore.get(state, '') self.lexerrorf = self.lexstateerrorf.get(state, None) self.lexeoff = self.lexstateeoff.get(state, None) self.lexstate = state # ------------------------------------------------------------ # push_state() - Changes the lexing state and saves old on stack # ------------------------------------------------------------ def push_state(self, state): self.lexstatestack.append(self.lexstate) self.begin(state) # ------------------------------------------------------------ # pop_state() - Restores the previous state # ------------------------------------------------------------ def pop_state(self): self.begin(self.lexstatestack.pop()) # ------------------------------------------------------------ # current_state() - Returns the current lexing state # ------------------------------------------------------------ def current_state(self): return self.lexstate # ------------------------------------------------------------ # skip() - Skip ahead n characters # ------------------------------------------------------------ def skip(self, n): self.lexpos += n # ------------------------------------------------------------ # opttoken() - Return the next token from the Lexer # # Note: This function has been carefully implemented to be as fast # as possible. Don't make changes unless you really know what # you are doing # ------------------------------------------------------------ def token(self): # Make local copies of frequently referenced attributes lexpos = self.lexpos lexlen = self.lexlen lexignore = self.lexignore lexdata = self.lexdata while lexpos < lexlen: # This code provides some short-circuit code for whitespace, tabs, and other ignored characters if lexdata[lexpos] in lexignore: lexpos += 1 continue # Look for a regular expression match for lexre, lexindexfunc in self.lexre: m = lexre.match(lexdata, lexpos) if not m: continue # Create a token for return tok = LexToken() tok.value = m.group() tok.lineno = self.lineno tok.lexpos = lexpos i = m.lastindex func, tok.type = lexindexfunc[i] if not func: # If no token type was set, it's an ignored token if tok.type: self.lexpos = m.end() return tok else: lexpos = m.end() break lexpos = m.end() # If token is processed by a function, call it tok.lexer = self # Set additional attributes useful in token rules self.lexmatch = m self.lexpos = lexpos newtok = func(tok) # Every function must return a token, if nothing, we just move to next token if not newtok: lexpos = self.lexpos # This is here in case user has updated lexpos. lexignore = self.lexignore # This is here in case there was a state change break # Verify type of the token. If not in the token map, raise an error if not self.lexoptimize: if newtok.type not in self.lextokens_all: raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( func.__code__.co_filename, func.__code__.co_firstlineno, func.__name__, newtok.type), lexdata[lexpos:]) return newtok else: # No match, see if in literals if lexdata[lexpos] in self.lexliterals: tok = LexToken() tok.value = lexdata[lexpos] tok.lineno = self.lineno tok.type = tok.value tok.lexpos = lexpos self.lexpos = lexpos + 1 return tok # No match. Call t_error() if defined. if self.lexerrorf: tok = LexToken() tok.value = self.lexdata[lexpos:] tok.lineno = self.lineno tok.type = 'error' tok.lexer = self tok.lexpos = lexpos self.lexpos = lexpos newtok = self.lexerrorf(tok) if lexpos == self.lexpos: # Error method didn't change text position at all. This is an error. raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) lexpos = self.lexpos if not newtok: continue return newtok self.lexpos = lexpos raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:]) if self.lexeoff: tok = LexToken() tok.type = 'eof' tok.value = '' tok.lineno = self.lineno tok.lexpos = lexpos tok.lexer = self self.lexpos = lexpos newtok = self.lexeoff(tok) return newtok self.lexpos = lexpos + 1 if self.lexdata is None: raise RuntimeError('No input string given with input()') return None # Iterator interface def __iter__(self): return self def next(self): t = self.token() if t is None: raise StopIteration return t __next__ = next # ----------------------------------------------------------------------------- # ==== Lex Builder === # # The functions and classes below are used to collect lexing information # and build a Lexer object from it. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # _get_regex(func) # # Returns the regular expression assigned to a function either as a doc string # or as a .regex attribute attached by the @TOKEN decorator. # ----------------------------------------------------------------------------- def _get_regex(func): return getattr(func, 'regex', func.__doc__) # ----------------------------------------------------------------------------- # get_caller_module_dict() # # This function returns a dictionary containing all of the symbols defined within # a caller further down the call stack. This is used to get the environment # associated with the yacc() call if none was provided. # ----------------------------------------------------------------------------- def get_caller_module_dict(levels): f = sys._getframe(levels) ldict = f.f_globals.copy() if f.f_globals != f.f_locals: ldict.update(f.f_locals) return ldict # ----------------------------------------------------------------------------- # _funcs_to_names() # # Given a list of regular expression functions, this converts it to a list # suitable for output to a table file # ----------------------------------------------------------------------------- def _funcs_to_names(funclist, namelist): result = [] for f, name in zip(funclist, namelist): if f and f[0]: result.append((name, f[1])) else: result.append(f) return result # ----------------------------------------------------------------------------- # _names_to_funcs() # # Given a list of regular expression function names, this converts it back to # functions. # ----------------------------------------------------------------------------- def _names_to_funcs(namelist, fdict): result = [] for n in namelist: if n and n[0]: result.append((fdict[n[0]], n[1])) else: result.append(n) return result # ----------------------------------------------------------------------------- # _form_master_re() # # This function takes a list of all of the regex components and attempts to # form the master regular expression. Given limitations in the Python re # module, it may be necessary to break the master regex into separate expressions. # ----------------------------------------------------------------------------- def _form_master_re(relist, reflags, ldict, toknames): if not relist: return [] regex = '|'.join(relist) try: lexre = re.compile(regex, reflags) # Build the index to function map for the matching engine lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1) lexindexnames = lexindexfunc[:] for f, i in lexre.groupindex.items(): handle = ldict.get(f, None) if type(handle) in (types.FunctionType, types.MethodType): lexindexfunc[i] = (handle, toknames[f]) lexindexnames[i] = f elif handle is not None: lexindexnames[i] = f if f.find('ignore_') > 0: lexindexfunc[i] = (None, None) else: lexindexfunc[i] = (None, toknames[f]) return [(lexre, lexindexfunc)], [regex], [lexindexnames] except Exception: m = int(len(relist)/2) if m == 0: m = 1 llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames) rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames) return (llist+rlist), (lre+rre), (lnames+rnames) # ----------------------------------------------------------------------------- # def _statetoken(s,names) # # Given a declaration name s of the form "t_" and a dictionary whose keys are # state names, this function returns a tuple (states,tokenname) where states # is a tuple of state names and tokenname is the name of the token. For example, # calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM') # ----------------------------------------------------------------------------- def _statetoken(s, names): nonstate = 1 parts = s.split('_') for i, part in enumerate(parts[1:], 1): if part not in names and part != 'ANY': break if i > 1: states = tuple(parts[1:i]) else: states = ('INITIAL',) if 'ANY' in states: states = tuple(names) tokenname = '_'.join(parts[i:]) return (states, tokenname) # ----------------------------------------------------------------------------- # LexerReflect() # # This class represents information needed to build a lexer as extracted from a # user's input file. # ----------------------------------------------------------------------------- class LexerReflect(object): def __init__(self, ldict, log=None, reflags=0): self.ldict = ldict self.error_func = None self.tokens = [] self.reflags = reflags self.stateinfo = {'INITIAL': 'inclusive'} self.modules = set() self.error = False self.log = PlyLogger(sys.stderr) if log is None else log # Get all of the basic information def get_all(self): self.get_tokens() self.get_literals() self.get_states() self.get_rules() # Validate all of the information def validate_all(self): self.validate_tokens() self.validate_literals() self.validate_rules() return self.error # Get the tokens map def get_tokens(self): tokens = self.ldict.get('tokens', None) if not tokens: self.log.error('No token list is defined') self.error = True return if not isinstance(tokens, (list, tuple)): self.log.error('tokens must be a list or tuple') self.error = True return if not tokens: self.log.error('tokens is empty') self.error = True return self.tokens = tokens # Validate the tokens def validate_tokens(self): terminals = {} for n in self.tokens: if not _is_identifier.match(n): self.log.error("Bad token name '%s'", n) self.error = True if n in terminals: self.log.warning("Token '%s' multiply defined", n) terminals[n] = 1 # Get the literals specifier def get_literals(self): self.literals = self.ldict.get('literals', '') if not self.literals: self.literals = '' # Validate literals def validate_literals(self): try: for c in self.literals: if not isinstance(c, StringTypes) or len(c) > 1: self.log.error('Invalid literal %s. Must be a single character', repr(c)) self.error = True except TypeError: self.log.error('Invalid literals specification. literals must be a sequence of characters') self.error = True def get_states(self): self.states = self.ldict.get('states', None) # Build statemap if self.states: if not isinstance(self.states, (tuple, list)): self.log.error('states must be defined as a tuple or list') self.error = True else: for s in self.states: if not isinstance(s, tuple) or len(s) != 2: self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s)) self.error = True continue name, statetype = s if not isinstance(name, StringTypes): self.log.error('State name %s must be a string', repr(name)) self.error = True continue if not (statetype == 'inclusive' or statetype == 'exclusive'): self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name) self.error = True continue if name in self.stateinfo: self.log.error("State '%s' already defined", name) self.error = True continue self.stateinfo[name] = statetype # Get all of the symbols with a t_ prefix and sort them into various # categories (functions, strings, error functions, and ignore characters) def get_rules(self): tsymbols = [f for f in self.ldict if f[:2] == 't_'] # Now build up a list of functions and a list of strings self.toknames = {} # Mapping of symbols to token names self.funcsym = {} # Symbols defined as functions self.strsym = {} # Symbols defined as strings self.ignore = {} # Ignore strings by state self.errorf = {} # Error functions by state self.eoff = {} # EOF functions by state for s in self.stateinfo: self.funcsym[s] = [] self.strsym[s] = [] if len(tsymbols) == 0: self.log.error('No rules of the form t_rulename are defined') self.error = True return for f in tsymbols: t = self.ldict[f] states, tokname = _statetoken(f, self.stateinfo) self.toknames[f] = tokname if hasattr(t, '__call__'): if tokname == 'error': for s in states: self.errorf[s] = t elif tokname == 'eof': for s in states: self.eoff[s] = t elif tokname == 'ignore': line = t.__code__.co_firstlineno file = t.__code__.co_filename self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__) self.error = True else: for s in states: self.funcsym[s].append((f, t)) elif isinstance(t, StringTypes): if tokname == 'ignore': for s in states: self.ignore[s] = t if '\\' in t: self.log.warning("%s contains a literal backslash '\\'", f) elif tokname == 'error': self.log.error("Rule '%s' must be defined as a function", f) self.error = True else: for s in states: self.strsym[s].append((f, t)) else: self.log.error('%s not defined as a function or string', f) self.error = True # Sort the functions by line number for f in self.funcsym.values(): f.sort(key=lambda x: x[1].__code__.co_firstlineno) # Sort the strings by regular expression length for s in self.strsym.values(): s.sort(key=lambda x: len(x[1]), reverse=True) # Validate all of the t_rules collected def validate_rules(self): for state in self.stateinfo: # Validate all rules defined by functions for fname, f in self.funcsym[state]: line = f.__code__.co_firstlineno file = f.__code__.co_filename module = inspect.getmodule(f) self.modules.add(module) tokname = self.toknames[fname] if isinstance(f, types.MethodType): reqargs = 2 else: reqargs = 1 nargs = f.__code__.co_argcount if nargs > reqargs: self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) self.error = True continue if nargs < reqargs: self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) self.error = True continue if not _get_regex(f): self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__) self.error = True continue try: c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags) if c.match(''): self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__) self.error = True except re.error as e: self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e) if '#' in _get_regex(f): self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__) self.error = True # Validate all rules defined by strings for name, r in self.strsym[state]: tokname = self.toknames[name] if tokname == 'error': self.log.error("Rule '%s' must be defined as a function", name) self.error = True continue if tokname not in self.tokens and tokname.find('ignore_') < 0: self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname) self.error = True continue try: c = re.compile('(?P<%s>%s)' % (name, r), self.reflags) if (c.match('')): self.log.error("Regular expression for rule '%s' matches empty string", name) self.error = True except re.error as e: self.log.error("Invalid regular expression for rule '%s'. %s", name, e) if '#' in r: self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name) self.error = True if not self.funcsym[state] and not self.strsym[state]: self.log.error("No rules defined for state '%s'", state) self.error = True # Validate the error function efunc = self.errorf.get(state, None) if efunc: f = efunc line = f.__code__.co_firstlineno file = f.__code__.co_filename module = inspect.getmodule(f) self.modules.add(module) if isinstance(f, types.MethodType): reqargs = 2 else: reqargs = 1 nargs = f.__code__.co_argcount if nargs > reqargs: self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) self.error = True if nargs < reqargs: self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) self.error = True for module in self.modules: self.validate_module(module) # ----------------------------------------------------------------------------- # validate_module() # # This checks to see if there are duplicated t_rulename() functions or strings # in the parser input file. This is done using a simple regular expression # match on each line in the source code of the given module. # ----------------------------------------------------------------------------- def validate_module(self, module): try: lines, linen = inspect.getsourcelines(module) except IOError: return fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') counthash = {} linen += 1 for line in lines: m = fre.match(line) if not m: m = sre.match(line) if m: name = m.group(1) prev = counthash.get(name) if not prev: counthash[name] = linen else: filename = inspect.getsourcefile(module) self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev) self.error = True linen += 1 # ----------------------------------------------------------------------------- # lex(module) # # Build all of the regular expression rules from definitions in the supplied module # ----------------------------------------------------------------------------- def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab', reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None): if lextab is None: lextab = 'lextab' global lexer ldict = None stateinfo = {'INITIAL': 'inclusive'} lexobj = Lexer() lexobj.lexoptimize = optimize global token, input if errorlog is None: errorlog = PlyLogger(sys.stderr) if debug: if debuglog is None: debuglog = PlyLogger(sys.stderr) # Get the module dictionary used for the lexer if object: module = object # Get the module dictionary used for the parser if module: _items = [(k, getattr(module, k)) for k in dir(module)] ldict = dict(_items) # If no __file__ attribute is available, try to obtain it from the __module__ instead if '__file__' not in ldict: ldict['__file__'] = sys.modules[ldict['__module__']].__file__ else: ldict = get_caller_module_dict(2) # Determine if the module is package of a package or not. # If so, fix the tabmodule setting so that tables load correctly pkg = ldict.get('__package__') if pkg and isinstance(lextab, str): if '.' not in lextab: lextab = pkg + '.' + lextab # Collect parser information from the dictionary linfo = LexerReflect(ldict, log=errorlog, reflags=reflags) linfo.get_all() if not optimize: if linfo.validate_all(): raise SyntaxError("Can't build lexer") if optimize and lextab: try: lexobj.readtab(lextab, ldict) token = lexobj.token input = lexobj.input lexer = lexobj return lexobj except ImportError: pass # Dump some basic debugging information if debug: debuglog.info('lex: tokens = %r', linfo.tokens) debuglog.info('lex: literals = %r', linfo.literals) debuglog.info('lex: states = %r', linfo.stateinfo) # Build a dictionary of valid token names lexobj.lextokens = set() for n in linfo.tokens: lexobj.lextokens.add(n) # Get literals specification if isinstance(linfo.literals, (list, tuple)): lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) else: lexobj.lexliterals = linfo.literals lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals) # Get the stateinfo dictionary stateinfo = linfo.stateinfo regexs = {} # Build the master regular expressions for state in stateinfo: regex_list = [] # Add rules defined by functions first for fname, f in linfo.funcsym[state]: line = f.__code__.co_firstlineno file = f.__code__.co_filename regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f))) if debug: debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state) # Now add all of the simple rules for name, r in linfo.strsym[state]: regex_list.append('(?P<%s>%s)' % (name, r)) if debug: debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state) regexs[state] = regex_list # Build the master regular expressions if debug: debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====') for state in regexs: lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames) lexobj.lexstatere[state] = lexre lexobj.lexstateretext[state] = re_text lexobj.lexstaterenames[state] = re_names if debug: for i, text in enumerate(re_text): debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text) # For inclusive states, we need to add the regular expressions from the INITIAL state for state, stype in stateinfo.items(): if state != 'INITIAL' and stype == 'inclusive': lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) lexobj.lexstateinfo = stateinfo lexobj.lexre = lexobj.lexstatere['INITIAL'] lexobj.lexretext = lexobj.lexstateretext['INITIAL'] lexobj.lexreflags = reflags # Set up ignore variables lexobj.lexstateignore = linfo.ignore lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '') # Set up error functions lexobj.lexstateerrorf = linfo.errorf lexobj.lexerrorf = linfo.errorf.get('INITIAL', None) if not lexobj.lexerrorf: errorlog.warning('No t_error rule is defined') # Set up eof functions lexobj.lexstateeoff = linfo.eoff lexobj.lexeoff = linfo.eoff.get('INITIAL', None) # Check state information for ignore and error rules for s, stype in stateinfo.items(): if stype == 'exclusive': if s not in linfo.errorf: errorlog.warning("No error rule is defined for exclusive state '%s'", s) if s not in linfo.ignore and lexobj.lexignore: errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) elif stype == 'inclusive': if s not in linfo.errorf: linfo.errorf[s] = linfo.errorf.get('INITIAL', None) if s not in linfo.ignore: linfo.ignore[s] = linfo.ignore.get('INITIAL', '') # Create global versions of the token() and input() functions token = lexobj.token input = lexobj.input lexer = lexobj # If in optimize mode, we write the lextab if lextab and optimize: if outputdir is None: # If no output directory is set, the location of the output files # is determined according to the following rules: # - If lextab specifies a package, files go into that package directory # - Otherwise, files go in the same directory as the specifying module if isinstance(lextab, types.ModuleType): srcfile = lextab.__file__ else: if '.' not in lextab: srcfile = ldict['__file__'] else: parts = lextab.split('.') pkgname = '.'.join(parts[:-1]) exec('import %s' % pkgname) srcfile = getattr(sys.modules[pkgname], '__file__', '') outputdir = os.path.dirname(srcfile) try: lexobj.writetab(lextab, outputdir) except IOError as e: errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e)) return lexobj # ----------------------------------------------------------------------------- # runmain() # # This runs the lexer as a main program # ----------------------------------------------------------------------------- def runmain(lexer=None, data=None): if not data: try: filename = sys.argv[1] f = open(filename) data = f.read() f.close() except IndexError: sys.stdout.write('Reading from standard input (type EOF to end):\n') data = sys.stdin.read() if lexer: _input = lexer.input else: _input = input _input(data) if lexer: _token = lexer.token else: _token = token while True: tok = _token() if not tok: break sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos)) # ----------------------------------------------------------------------------- # @TOKEN(regex) # # This decorator function can be used to set the regex expression on a function # when its docstring might need to be set in an alternative way # ----------------------------------------------------------------------------- def TOKEN(r): def set_regex(f): if hasattr(r, '__call__'): f.regex = _get_regex(r) else: f.regex = r return f return set_regex # Alternative spelling of the TOKEN decorator Token = TOKEN
42,918
Python
38.017273
131
0.507386
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sniffio/_version.py
# This file is imported from __init__.py and exec'd from setup.py __version__ = "1.3.0"
89
Python
21.499995
65
0.640449
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sniffio/_impl.py
from contextvars import ContextVar from typing import Optional import sys import threading current_async_library_cvar = ContextVar( "current_async_library_cvar", default=None ) # type: ContextVar[Optional[str]] class _ThreadLocal(threading.local): # Since threading.local provides no explicit mechanism is for setting # a default for a value, a custom class with a class attribute is used # instead. name = None # type: Optional[str] thread_local = _ThreadLocal() class AsyncLibraryNotFoundError(RuntimeError): pass def current_async_library() -> str: """Detect which async library is currently running. The following libraries are currently supported: ================ =========== ============================ Library Requires Magic string ================ =========== ============================ **Trio** Trio v0.6+ ``"trio"`` **Curio** - ``"curio"`` **asyncio** ``"asyncio"`` **Trio-asyncio** v0.8.2+ ``"trio"`` or ``"asyncio"``, depending on current mode ================ =========== ============================ Returns: A string like ``"trio"``. Raises: AsyncLibraryNotFoundError: if called from synchronous context, or if the current async library was not recognized. Examples: .. code-block:: python3 from sniffio import current_async_library async def generic_sleep(seconds): library = current_async_library() if library == "trio": import trio await trio.sleep(seconds) elif library == "asyncio": import asyncio await asyncio.sleep(seconds) # ... and so on ... else: raise RuntimeError(f"Unsupported library {library!r}") """ value = thread_local.name if value is not None: return value value = current_async_library_cvar.get() if value is not None: return value # Need to sniff for asyncio if "asyncio" in sys.modules: import asyncio try: current_task = asyncio.current_task # type: ignore[attr-defined] except AttributeError: current_task = asyncio.Task.current_task # type: ignore[attr-defined] try: if current_task() is not None: return "asyncio" except RuntimeError: pass # Sniff for curio (for now) if 'curio' in sys.modules: from curio.meta import curio_running if curio_running(): return 'curio' raise AsyncLibraryNotFoundError( "unknown async library, or not in async context" )
2,843
Python
28.625
82
0.539923
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sniffio/__init__.py
"""Top-level package for sniffio.""" __all__ = [ "current_async_library", "AsyncLibraryNotFoundError", "current_async_library_cvar" ] from ._version import __version__ from ._impl import ( current_async_library, AsyncLibraryNotFoundError, current_async_library_cvar, thread_local, )
310
Python
18.437499
57
0.680645
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sniffio/_tests/test_sniffio.py
import os import sys import pytest from .. import ( current_async_library, AsyncLibraryNotFoundError, current_async_library_cvar, thread_local ) def test_basics_cvar(): with pytest.raises(AsyncLibraryNotFoundError): current_async_library() token = current_async_library_cvar.set("generic-lib") try: assert current_async_library() == "generic-lib" finally: current_async_library_cvar.reset(token) with pytest.raises(AsyncLibraryNotFoundError): current_async_library() def test_basics_tlocal(): with pytest.raises(AsyncLibraryNotFoundError): current_async_library() old_name, thread_local.name = thread_local.name, "generic-lib" try: assert current_async_library() == "generic-lib" finally: thread_local.name = old_name with pytest.raises(AsyncLibraryNotFoundError): current_async_library() def test_asyncio(): import asyncio with pytest.raises(AsyncLibraryNotFoundError): current_async_library() ran = [] async def this_is_asyncio(): assert current_async_library() == "asyncio" # Call it a second time to exercise the caching logic assert current_async_library() == "asyncio" ran.append(True) asyncio.run(this_is_asyncio()) assert ran == [True] with pytest.raises(AsyncLibraryNotFoundError): current_async_library() # https://github.com/dabeaz/curio/pull/354 @pytest.mark.skipif( os.name == "nt" and sys.version_info >= (3, 9), reason="Curio breaks on Python 3.9+ on Windows. Fix was not released yet", ) def test_curio(): import curio with pytest.raises(AsyncLibraryNotFoundError): current_async_library() ran = [] async def this_is_curio(): assert current_async_library() == "curio" # Call it a second time to exercise the caching logic assert current_async_library() == "curio" ran.append(True) curio.run(this_is_curio) assert ran == [True] with pytest.raises(AsyncLibraryNotFoundError): current_async_library()
2,110
Python
23.835294
78
0.661137
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_pclass.py
from pyrsistent._checked_types import (InvariantException, CheckedType, _restore_pickle, store_invariants) from pyrsistent._field_common import ( set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants ) from pyrsistent._transformations import transform def _is_pclass(bases): return len(bases) == 1 and bases[0] == CheckedType class PClassMeta(type): def __new__(mcs, name, bases, dct): set_fields(dct, bases, name='_pclass_fields') store_invariants(dct, bases, '_pclass_invariants', '__invariant__') dct['__slots__'] = ('_pclass_frozen',) + tuple(key for key in dct['_pclass_fields']) # There must only be one __weakref__ entry in the inheritance hierarchy, # lets put it on the top level class. if _is_pclass(bases): dct['__slots__'] += ('__weakref__',) return super(PClassMeta, mcs).__new__(mcs, name, bases, dct) _MISSING_VALUE = object() def _check_and_set_attr(cls, field, name, value, result, invariant_errors): check_type(cls, field, name, value) is_ok, error_code = field.invariant(value) if not is_ok: invariant_errors.append(error_code) else: setattr(result, name, value) class PClass(CheckedType, metaclass=PClassMeta): """ A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it is not a PMap and hence not a collection but rather a plain Python object. More documentation and examples of PClass usage is available at https://github.com/tobgu/pyrsistent """ def __new__(cls, **kwargs): # Support *args? result = super(PClass, cls).__new__(cls) factory_fields = kwargs.pop('_factory_fields', None) ignore_extra = kwargs.pop('ignore_extra', None) missing_fields = [] invariant_errors = [] for name, field in cls._pclass_fields.items(): if name in kwargs: if factory_fields is None or name in factory_fields: if is_field_ignore_extra_complaint(PClass, field, ignore_extra): value = field.factory(kwargs[name], ignore_extra=ignore_extra) else: value = field.factory(kwargs[name]) else: value = kwargs[name] _check_and_set_attr(cls, field, name, value, result, invariant_errors) del kwargs[name] elif field.initial is not PFIELD_NO_INITIAL: initial = field.initial() if callable(field.initial) else field.initial _check_and_set_attr( cls, field, name, initial, result, invariant_errors) elif field.mandatory: missing_fields.append('{0}.{1}'.format(cls.__name__, name)) if invariant_errors or missing_fields: raise InvariantException(tuple(invariant_errors), tuple(missing_fields), 'Field invariant failed') if kwargs: raise AttributeError("'{0}' are not among the specified fields for {1}".format( ', '.join(kwargs), cls.__name__)) check_global_invariants(result, cls._pclass_invariants) result._pclass_frozen = True return result def set(self, *args, **kwargs): """ Set a field in the instance. Returns a new instance with the updated value. The original instance remains unmodified. Accepts key-value pairs or single string representing the field name and a value. >>> from pyrsistent import PClass, field >>> class AClass(PClass): ... x = field() ... >>> a = AClass(x=1) >>> a2 = a.set(x=2) >>> a3 = a.set('x', 3) >>> a AClass(x=1) >>> a2 AClass(x=2) >>> a3 AClass(x=3) """ if args: kwargs[args[0]] = args[1] factory_fields = set(kwargs) for key in self._pclass_fields: if key not in kwargs: value = getattr(self, key, _MISSING_VALUE) if value is not _MISSING_VALUE: kwargs[key] = value return self.__class__(_factory_fields=factory_fields, **kwargs) @classmethod def create(cls, kwargs, _factory_fields=None, ignore_extra=False): """ Factory method. Will create a new PClass of the current type and assign the values specified in kwargs. :param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not in the set of fields on the PClass. """ if isinstance(kwargs, cls): return kwargs if ignore_extra: kwargs = {k: kwargs[k] for k in cls._pclass_fields if k in kwargs} return cls(_factory_fields=_factory_fields, ignore_extra=ignore_extra, **kwargs) def serialize(self, format=None): """ Serialize the current PClass using custom serializer functions for fields where such have been supplied. """ result = {} for name in self._pclass_fields: value = getattr(self, name, _MISSING_VALUE) if value is not _MISSING_VALUE: result[name] = serialize(self._pclass_fields[name].serializer, format, value) return result def transform(self, *transformations): """ Apply transformations to the currency PClass. For more details on transformations see the documentation for PMap. Transformations on PClasses do not support key matching since the PClass is not a collection. Apart from that the transformations available for other persistent types work as expected. """ return transform(self, transformations) def __eq__(self, other): if isinstance(other, self.__class__): for name in self._pclass_fields: if getattr(self, name, _MISSING_VALUE) != getattr(other, name, _MISSING_VALUE): return False return True return NotImplemented def __ne__(self, other): return not self == other def __hash__(self): # May want to optimize this by caching the hash somehow return hash(tuple((key, getattr(self, key, _MISSING_VALUE)) for key in self._pclass_fields)) def __setattr__(self, key, value): if getattr(self, '_pclass_frozen', False): raise AttributeError("Can't set attribute, key={0}, value={1}".format(key, value)) super(PClass, self).__setattr__(key, value) def __delattr__(self, key): raise AttributeError("Can't delete attribute, key={0}, use remove()".format(key)) def _to_dict(self): result = {} for key in self._pclass_fields: value = getattr(self, key, _MISSING_VALUE) if value is not _MISSING_VALUE: result[key] = value return result def __repr__(self): return "{0}({1})".format(self.__class__.__name__, ', '.join('{0}={1}'.format(k, repr(v)) for k, v in self._to_dict().items())) def __reduce__(self): # Pickling support data = dict((key, getattr(self, key)) for key in self._pclass_fields if hasattr(self, key)) return _restore_pickle, (self.__class__, data,) def evolver(self): """ Returns an evolver for this object. """ return _PClassEvolver(self, self._to_dict()) def remove(self, name): """ Remove attribute given by name from the current instance. Raises AttributeError if the attribute doesn't exist. """ evolver = self.evolver() del evolver[name] return evolver.persistent() class _PClassEvolver(object): __slots__ = ('_pclass_evolver_original', '_pclass_evolver_data', '_pclass_evolver_data_is_dirty', '_factory_fields') def __init__(self, original, initial_dict): self._pclass_evolver_original = original self._pclass_evolver_data = initial_dict self._pclass_evolver_data_is_dirty = False self._factory_fields = set() def __getitem__(self, item): return self._pclass_evolver_data[item] def set(self, key, value): if self._pclass_evolver_data.get(key, _MISSING_VALUE) is not value: self._pclass_evolver_data[key] = value self._factory_fields.add(key) self._pclass_evolver_data_is_dirty = True return self def __setitem__(self, key, value): self.set(key, value) def remove(self, item): if item in self._pclass_evolver_data: del self._pclass_evolver_data[item] self._factory_fields.discard(item) self._pclass_evolver_data_is_dirty = True return self raise AttributeError(item) def __delitem__(self, item): self.remove(item) def persistent(self): if self._pclass_evolver_data_is_dirty: return self._pclass_evolver_original.__class__(_factory_fields=self._factory_fields, **self._pclass_evolver_data) return self._pclass_evolver_original def __setattr__(self, key, value): if key not in self.__slots__: self.set(key, value) else: super(_PClassEvolver, self).__setattr__(key, value) def __getattr__(self, item): return self[item]
9,702
Python
35.893536
120
0.591012
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_immutable.py
import sys def immutable(members='', name='Immutable', verbose=False): """ Produces a class that either can be used standalone or as a base class for persistent classes. This is a thin wrapper around a named tuple. Constructing a type and using it to instantiate objects: >>> Point = immutable('x, y', name='Point') >>> p = Point(1, 2) >>> p2 = p.set(x=3) >>> p Point(x=1, y=2) >>> p2 Point(x=3, y=2) Inheriting from a constructed type. In this case no type name needs to be supplied: >>> class PositivePoint(immutable('x, y')): ... __slots__ = tuple() ... def __new__(cls, x, y): ... if x > 0 and y > 0: ... return super(PositivePoint, cls).__new__(cls, x, y) ... raise Exception('Coordinates must be positive!') ... >>> p = PositivePoint(1, 2) >>> p.set(x=3) PositivePoint(x=3, y=2) >>> p.set(y=-3) Traceback (most recent call last): Exception: Coordinates must be positive! The persistent class also supports the notion of frozen members. The value of a frozen member cannot be updated. For example it could be used to implement an ID that should remain the same over time. A frozen member is denoted by a trailing underscore. >>> Point = immutable('x, y, id_', name='Point') >>> p = Point(1, 2, id_=17) >>> p.set(x=3) Point(x=3, y=2, id_=17) >>> p.set(id_=18) Traceback (most recent call last): AttributeError: Cannot set frozen members id_ """ if isinstance(members, str): members = members.replace(',', ' ').split() def frozen_member_test(): frozen_members = ["'%s'" % f for f in members if f.endswith('_')] if frozen_members: return """ frozen_fields = fields_to_modify & set([{frozen_members}]) if frozen_fields: raise AttributeError('Cannot set frozen members %s' % ', '.join(frozen_fields)) """.format(frozen_members=', '.join(frozen_members)) return '' quoted_members = ', '.join("'%s'" % m for m in members) template = """ class {class_name}(namedtuple('ImmutableBase', [{quoted_members}])): __slots__ = tuple() def __repr__(self): return super({class_name}, self).__repr__().replace('ImmutableBase', self.__class__.__name__) def set(self, **kwargs): if not kwargs: return self fields_to_modify = set(kwargs.keys()) if not fields_to_modify <= {member_set}: raise AttributeError("'%s' is not a member" % ', '.join(fields_to_modify - {member_set})) {frozen_member_test} return self.__class__.__new__(self.__class__, *map(kwargs.pop, [{quoted_members}], self)) """.format(quoted_members=quoted_members, member_set="set([%s])" % quoted_members if quoted_members else 'set()', frozen_member_test=frozen_member_test(), class_name=name) if verbose: print(template) from collections import namedtuple namespace = dict(namedtuple=namedtuple, __name__='pyrsistent_immutable') try: exec(template, namespace) except SyntaxError as e: raise SyntaxError(str(e) + ':\n' + template) from e return namespace[name]
3,287
Python
32.55102
101
0.585336