file_path
stringlengths
21
202
content
stringlengths
12
1.02M
size
int64
12
1.02M
lang
stringclasses
9 values
avg_line_length
float64
3.33
100
max_line_length
int64
10
993
alphanum_fraction
float64
0.27
0.93
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/exceptions.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Exceptions coverage.py can raise.""" class BaseCoverageException(Exception): """The base of all Coverage exceptions.""" pass class CoverageException(BaseCoverageException): """An exception raised by a coverage.py function.""" pass class NoSource(CoverageException): """We couldn't find the source for a module.""" pass class NoCode(NoSource): """We couldn't find any code at all.""" pass class NotPython(CoverageException): """A source file turned out not to be parsable Python.""" pass class ExceptionDuringRun(CoverageException): """An exception happened while running customer code. Construct it with three arguments, the values from `sys.exc_info`. """ pass class StopEverything(BaseCoverageException): """An exception that means everything should stop. The CoverageTest class converts these to SkipTest, so that when running tests, raising this exception will automatically skip the test. """ pass class CoverageWarning(Warning): """A warning from Coverage.py.""" pass
1,237
Python
21.925926
79
0.713015
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/phystokens.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Better tokenizing for coverage.py.""" import ast import keyword import re import token import tokenize from coverage import env from coverage.misc import contract def phys_tokens(toks): """Return all physical tokens, even line continuations. tokenize.generate_tokens() doesn't return a token for the backslash that continues lines. This wrapper provides those tokens so that we can re-create a faithful representation of the original source. Returns the same values as generate_tokens() """ last_line = None last_lineno = -1 last_ttext = None for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks: if last_lineno != elineno: if last_line and last_line.endswith("\\\n"): # We are at the beginning of a new line, and the last line # ended with a backslash. We probably have to inject a # backslash token into the stream. Unfortunately, there's more # to figure out. This code:: # # usage = """\ # HEY THERE # """ # # triggers this condition, but the token text is:: # # '"""\\\nHEY THERE\n"""' # # so we need to figure out if the backslash is already in the # string token or not. inject_backslash = True if last_ttext.endswith("\\"): inject_backslash = False elif ttype == token.STRING: if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\': # It's a multi-line string and the first line ends with # a backslash, so we don't need to inject another. inject_backslash = False if inject_backslash: # Figure out what column the backslash is in. ccol = len(last_line.split("\n")[-2]) - 1 # Yield the token, with a fake token type. yield ( 99999, "\\\n", (slineno, ccol), (slineno, ccol+2), last_line ) last_line = ltext if ttype not in (tokenize.NEWLINE, tokenize.NL): last_ttext = ttext yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext last_lineno = elineno class MatchCaseFinder(ast.NodeVisitor): """Helper for finding match/case lines.""" def __init__(self, source): # This will be the set of line numbers that start match or case statements. self.match_case_lines = set() self.visit(ast.parse(source)) def visit_Match(self, node): """Invoked by ast.NodeVisitor.visit""" self.match_case_lines.add(node.lineno) for case in node.cases: self.match_case_lines.add(case.pattern.lineno) self.generic_visit(node) @contract(source='unicode') def source_token_lines(source): """Generate a series of lines, one for each line in `source`. Each line is a list of pairs, each pair is a token:: [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] Each pair has a token class, and the token text. If you concatenate all the token texts, and then join them with newlines, you should have your original `source` back, with two differences: trailing whitespace is not preserved, and a final line with no newline is indistinguishable from a final line with a newline. """ ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL} line = [] col = 0 source = source.expandtabs(8).replace('\r\n', '\n') tokgen = generate_tokens(source) if env.PYBEHAVIOR.soft_keywords: match_case_lines = MatchCaseFinder(source).match_case_lines for ttype, ttext, (sline, scol), (_, ecol), _ in phys_tokens(tokgen): mark_start = True for part in re.split('(\n)', ttext): if part == '\n': yield line line = [] col = 0 mark_end = False elif part == '': mark_end = False elif ttype in ws_tokens: mark_end = False else: if mark_start and scol > col: line.append(("ws", " " * (scol - col))) mark_start = False tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3] if ttype == token.NAME: if keyword.iskeyword(ttext): # Hard keywords are always keywords. tok_class = "key" elif env.PYBEHAVIOR.soft_keywords and keyword.issoftkeyword(ttext): # Soft keywords appear at the start of the line, on lines that start # match or case statements. if len(line) == 0: is_start_of_line = True elif (len(line) == 1) and line[0][0] == "ws": is_start_of_line = True else: is_start_of_line = False if is_start_of_line and sline in match_case_lines: tok_class = "key" line.append((tok_class, part)) mark_end = True scol = 0 if mark_end: col = ecol if line: yield line class CachedTokenizer: """A one-element cache around tokenize.generate_tokens. When reporting, coverage.py tokenizes files twice, once to find the structure of the file, and once to syntax-color it. Tokenizing is expensive, and easily cached. This is a one-element cache so that our twice-in-a-row tokenizing doesn't actually tokenize twice. """ def __init__(self): self.last_text = None self.last_tokens = None @contract(text='unicode') def generate_tokens(self, text): """A stand-in for `tokenize.generate_tokens`.""" if text != self.last_text: self.last_text = text readline = iter(text.splitlines(True)).__next__ self.last_tokens = list(tokenize.generate_tokens(readline)) return self.last_tokens # Create our generate_tokens cache as a callable replacement function. generate_tokens = CachedTokenizer().generate_tokens COOKIE_RE = re.compile(r"^[ \t]*#.*coding[:=][ \t]*([-\w.]+)", flags=re.MULTILINE) @contract(source='bytes') def source_encoding(source): """Determine the encoding for `source`, according to PEP 263. `source` is a byte string: the text of the program. Returns a string, the name of the encoding. """ readline = iter(source.splitlines(True)).__next__ return tokenize.detect_encoding(readline)[0] @contract(source='unicode') def compile_unicode(source, filename, mode): """Just like the `compile` builtin, but works on any Unicode string. Python 2's compile() builtin has a stupid restriction: if the source string is Unicode, then it may not have a encoding declaration in it. Why not? Who knows! It also decodes to utf-8, and then tries to interpret those utf-8 bytes according to the encoding declaration. Why? Who knows! This function neuters the coding declaration, and compiles it. """ source = neuter_encoding_declaration(source) code = compile(source, filename, mode) return code @contract(source='unicode', returns='unicode') def neuter_encoding_declaration(source): """Return `source`, with any encoding declaration neutered.""" if COOKIE_RE.search(source): source_lines = source.splitlines(True) for lineno in range(min(2, len(source_lines))): source_lines[lineno] = COOKIE_RE.sub("# (deleted declaration)", source_lines[lineno]) source = "".join(source_lines) return source
8,236
Python
35.772321
97
0.568237
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/report.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Reporter foundation for coverage.py.""" import sys from coverage.exceptions import CoverageException, NotPython from coverage.files import prep_patterns, FnmatchMatcher from coverage.misc import ensure_dir_for_file, file_be_gone def render_report(output_path, reporter, morfs, msgfn): """Run a one-file report generator, managing the output file. This function ensures the output file is ready to be written to. Then writes the report to it. Then closes the file and cleans up. """ file_to_close = None delete_file = False if output_path == "-": outfile = sys.stdout else: # Ensure that the output directory is created; done here # because this report pre-opens the output file. # HTMLReport does this using the Report plumbing because # its task is more complex, being multiple files. ensure_dir_for_file(output_path) outfile = open(output_path, "w", encoding="utf-8") file_to_close = outfile try: return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(output_path) # pragma: part covered (doesn't return) else: msgfn(f"Wrote {reporter.report_type} to {output_path}") def get_analysis_to_report(coverage, morfs): """Get the files to report on. For each morf in `morfs`, if it should be reported on (based on the omit and include configuration options), yield a pair, the `FileReporter` and `Analysis` for the morf. """ file_reporters = coverage._get_file_reporters(morfs) config = coverage.config if config.report_include: matcher = FnmatchMatcher(prep_patterns(config.report_include), "report_include") file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)] if config.report_omit: matcher = FnmatchMatcher(prep_patterns(config.report_omit), "report_omit") file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)] if not file_reporters: raise CoverageException("No data to report.") for fr in sorted(file_reporters): try: analysis = coverage._analyze(fr) except NotPython: # Only report errors for .py files, and only if we didn't # explicitly suppress those errors. # NotPython is only raised by PythonFileReporter, which has a # should_be_python() method. if fr.should_be_python(): if config.ignore_errors: msg = f"Couldn't parse Python file '{fr.filename}'" coverage._warn(msg, slug="couldnt-parse") else: raise except Exception as exc: if config.ignore_errors: msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip() coverage._warn(msg, slug="couldnt-parse") else: raise else: yield (fr, analysis)
3,327
Python
35.173913
91
0.62609
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/parser.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Code parsing for coverage.py.""" import ast import collections import os import re import token import tokenize from coverage import env from coverage.bytecode import code_objects from coverage.debug import short_stack from coverage.exceptions import NoSource, NotPython, StopEverything from coverage.misc import contract, join_regex, new_contract, nice_pair, one_of from coverage.phystokens import compile_unicode, generate_tokens, neuter_encoding_declaration class PythonParser: """Parse code to find executable lines, excluded lines, etc. This information is all based on static analysis: no code execution is involved. """ @contract(text='unicode|None') def __init__(self, text=None, filename=None, exclude=None): """ Source can be provided as `text`, the text itself, or `filename`, from which the text will be read. Excluded lines are those that match `exclude`, a regex. """ assert text or filename, "PythonParser needs either text or filename" self.filename = filename or "<code>" self.text = text if not self.text: from coverage.python import get_python_source try: self.text = get_python_source(self.filename) except OSError as err: raise NoSource(f"No source for code: '{self.filename}': {err}") from err self.exclude = exclude # The text lines of the parsed code. self.lines = self.text.split('\n') # The normalized line numbers of the statements in the code. Exclusions # are taken into account, and statements are adjusted to their first # lines. self.statements = set() # The normalized line numbers of the excluded lines in the code, # adjusted to their first lines. self.excluded = set() # The raw_* attributes are only used in this class, and in # lab/parser.py to show how this class is working. # The line numbers that start statements, as reported by the line # number table in the bytecode. self.raw_statements = set() # The raw line numbers of excluded lines of code, as marked by pragmas. self.raw_excluded = set() # The line numbers of class and function definitions. self.raw_classdefs = set() # The line numbers of docstring lines. self.raw_docstrings = set() # Internal detail, used by lab/parser.py. self.show_tokens = False # A dict mapping line numbers to lexical statement starts for # multi-line statements. self._multiline = {} # Lazily-created arc data, and missing arc descriptions. self._all_arcs = None self._missing_arc_fragments = None def lines_matching(self, *regexes): """Find the lines matching one of a list of regexes. Returns a set of line numbers, the lines that contain a match for one of the regexes in `regexes`. The entire line needn't match, just a part of it. """ combined = join_regex(regexes) regex_c = re.compile(combined) matches = set() for i, ltext in enumerate(self.lines, start=1): if regex_c.search(ltext): matches.add(i) return matches def _raw_parse(self): """Parse the source to find the interesting facts about its lines. A handful of attributes are updated. """ # Find lines which match an exclusion pattern. if self.exclude: self.raw_excluded = self.lines_matching(self.exclude) # Tokenize, to find excluded suites, to find docstrings, and to find # multi-line statements. indent = 0 exclude_indent = 0 excluding = False excluding_decorators = False prev_toktype = token.INDENT first_line = None empty = True first_on_line = True tokgen = generate_tokens(self.text) for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen: if self.show_tokens: # pragma: debugging print("%10s %5s %-20r %r" % ( tokenize.tok_name.get(toktype, toktype), nice_pair((slineno, elineno)), ttext, ltext )) if toktype == token.INDENT: indent += 1 elif toktype == token.DEDENT: indent -= 1 elif toktype == token.NAME: if ttext == 'class': # Class definitions look like branches in the bytecode, so # we need to exclude them. The simplest way is to note the # lines with the 'class' keyword. self.raw_classdefs.add(slineno) elif toktype == token.OP: if ttext == ':': should_exclude = (elineno in self.raw_excluded) or excluding_decorators if not excluding and should_exclude: # Start excluding a suite. We trigger off of the colon # token so that the #pragma comment will be recognized on # the same line as the colon. self.raw_excluded.add(elineno) exclude_indent = indent excluding = True excluding_decorators = False elif ttext == '@' and first_on_line: # A decorator. if elineno in self.raw_excluded: excluding_decorators = True if excluding_decorators: self.raw_excluded.add(elineno) elif toktype == token.STRING and prev_toktype == token.INDENT: # Strings that are first on an indented line are docstrings. # (a trick from trace.py in the stdlib.) This works for # 99.9999% of cases. For the rest (!) see: # http://stackoverflow.com/questions/1769332/x/1769794#1769794 self.raw_docstrings.update(range(slineno, elineno+1)) elif toktype == token.NEWLINE: if first_line is not None and elineno != first_line: # We're at the end of a line, and we've ended on a # different line than the first line of the statement, # so record a multi-line range. for l in range(first_line, elineno+1): self._multiline[l] = first_line first_line = None first_on_line = True if ttext.strip() and toktype != tokenize.COMMENT: # A non-whitespace token. empty = False if first_line is None: # The token is not whitespace, and is the first in a # statement. first_line = slineno # Check whether to end an excluded suite. if excluding and indent <= exclude_indent: excluding = False if excluding: self.raw_excluded.add(elineno) first_on_line = False prev_toktype = toktype # Find the starts of the executable statements. if not empty: byte_parser = ByteParser(self.text, filename=self.filename) self.raw_statements.update(byte_parser._find_statements()) # The first line of modules can lie and say 1 always, even if the first # line of code is later. If so, map 1 to the actual first line of the # module. if env.PYBEHAVIOR.module_firstline_1 and self._multiline: self._multiline[1] = min(self.raw_statements) def first_line(self, line): """Return the first line number of the statement including `line`.""" if line < 0: line = -self._multiline.get(-line, -line) else: line = self._multiline.get(line, line) return line def first_lines(self, lines): """Map the line numbers in `lines` to the correct first line of the statement. Returns a set of the first lines. """ return {self.first_line(l) for l in lines} def translate_lines(self, lines): """Implement `FileReporter.translate_lines`.""" return self.first_lines(lines) def translate_arcs(self, arcs): """Implement `FileReporter.translate_arcs`.""" return [(self.first_line(a), self.first_line(b)) for (a, b) in arcs] def parse_source(self): """Parse source text to find executable lines, excluded lines, etc. Sets the .excluded and .statements attributes, normalized to the first line of multi-line statements. """ try: self._raw_parse() except (tokenize.TokenError, IndentationError) as err: if hasattr(err, "lineno"): lineno = err.lineno # IndentationError else: lineno = err.args[1][0] # TokenError raise NotPython( f"Couldn't parse '{self.filename}' as Python source: " + f"{err.args[0]!r} at line {lineno}" ) from err self.excluded = self.first_lines(self.raw_excluded) ignore = self.excluded | self.raw_docstrings starts = self.raw_statements - ignore self.statements = self.first_lines(starts) - ignore def arcs(self): """Get information about the arcs available in the code. Returns a set of line number pairs. Line numbers have been normalized to the first line of multi-line statements. """ if self._all_arcs is None: self._analyze_ast() return self._all_arcs def _analyze_ast(self): """Run the AstArcAnalyzer and save its results. `_all_arcs` is the set of arcs in the code. """ aaa = AstArcAnalyzer(self.text, self.raw_statements, self._multiline) aaa.analyze() self._all_arcs = set() for l1, l2 in aaa.arcs: fl1 = self.first_line(l1) fl2 = self.first_line(l2) if fl1 != fl2: self._all_arcs.add((fl1, fl2)) self._missing_arc_fragments = aaa.missing_arc_fragments def exit_counts(self): """Get a count of exits from that each line. Excluded lines are excluded. """ exit_counts = collections.defaultdict(int) for l1, l2 in self.arcs(): if l1 < 0: # Don't ever report -1 as a line number continue if l1 in self.excluded: # Don't report excluded lines as line numbers. continue if l2 in self.excluded: # Arcs to excluded lines shouldn't count. continue exit_counts[l1] += 1 # Class definitions have one extra exit, so remove one for each: for l in self.raw_classdefs: # Ensure key is there: class definitions can include excluded lines. if l in exit_counts: exit_counts[l] -= 1 return exit_counts def missing_arc_description(self, start, end, executed_arcs=None): """Provide an English sentence describing a missing arc.""" if self._missing_arc_fragments is None: self._analyze_ast() actual_start = start if ( executed_arcs and end < 0 and end == -start and (end, start) not in executed_arcs and (end, start) in self._missing_arc_fragments ): # It's a one-line callable, and we never even started it, # and we have a message about not starting it. start, end = end, start fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)]) msgs = [] for smsg, emsg in fragment_pairs: if emsg is None: if end < 0: # Hmm, maybe we have a one-line callable, let's check. if (-end, end) in self._missing_arc_fragments: return self.missing_arc_description(-end, end) emsg = "didn't jump to the function exit" else: emsg = "didn't jump to line {lineno}" emsg = emsg.format(lineno=end) msg = f"line {actual_start} {emsg}" if smsg is not None: msg += f", because {smsg.format(lineno=actual_start)}" msgs.append(msg) return " or ".join(msgs) class ByteParser: """Parse bytecode to understand the structure of code.""" @contract(text='unicode') def __init__(self, text, code=None, filename=None): self.text = text if code: self.code = code else: try: self.code = compile_unicode(text, filename, "exec") except SyntaxError as synerr: raise NotPython( "Couldn't parse '%s' as Python source: '%s' at line %d" % ( filename, synerr.msg, synerr.lineno ) ) from synerr # Alternative Python implementations don't always provide all the # attributes on code objects that we need to do the analysis. for attr in ['co_lnotab', 'co_firstlineno']: if not hasattr(self.code, attr): raise StopEverything( # pragma: only jython "This implementation of Python doesn't support code analysis.\n" + "Run coverage.py under another Python for this command." ) def child_parsers(self): """Iterate over all the code objects nested within this one. The iteration includes `self` as its first value. """ return (ByteParser(self.text, code=c) for c in code_objects(self.code)) def _line_numbers(self): """Yield the line numbers possible in this code object. Uses co_lnotab described in Python/compile.c to find the line numbers. Produces a sequence: l0, l1, ... """ if hasattr(self.code, "co_lines"): for _, _, line in self.code.co_lines(): if line is not None: yield line else: # Adapted from dis.py in the standard library. byte_increments = self.code.co_lnotab[0::2] line_increments = self.code.co_lnotab[1::2] last_line_num = None line_num = self.code.co_firstlineno byte_num = 0 for byte_incr, line_incr in zip(byte_increments, line_increments): if byte_incr: if line_num != last_line_num: yield line_num last_line_num = line_num byte_num += byte_incr if env.PYBEHAVIOR.negative_lnotab and line_incr >= 0x80: line_incr -= 0x100 line_num += line_incr if line_num != last_line_num: yield line_num def _find_statements(self): """Find the statements in `self.code`. Produce a sequence of line numbers that start statements. Recurses into all code objects reachable from `self.code`. """ for bp in self.child_parsers(): # Get all of the lineno information from this code. yield from bp._line_numbers() # # AST analysis # class BlockBase: """ Blocks need to handle various exiting statements in their own ways. All of these methods take a list of exits, and a callable `add_arc` function that they can use to add arcs if needed. They return True if the exits are handled, or False if the search should continue up the block stack. """ # pylint: disable=unused-argument def process_break_exits(self, exits, add_arc): """Process break exits.""" # Because break can only appear in loops, and most subclasses # implement process_break_exits, this function is never reached. raise AssertionError def process_continue_exits(self, exits, add_arc): """Process continue exits.""" # Because continue can only appear in loops, and most subclasses # implement process_continue_exits, this function is never reached. raise AssertionError def process_raise_exits(self, exits, add_arc): """Process raise exits.""" return False def process_return_exits(self, exits, add_arc): """Process return exits.""" return False class LoopBlock(BlockBase): """A block on the block stack representing a `for` or `while` loop.""" @contract(start=int) def __init__(self, start): # The line number where the loop starts. self.start = start # A set of ArcStarts, the arcs from break statements exiting this loop. self.break_exits = set() def process_break_exits(self, exits, add_arc): self.break_exits.update(exits) return True def process_continue_exits(self, exits, add_arc): for xit in exits: add_arc(xit.lineno, self.start, xit.cause) return True class FunctionBlock(BlockBase): """A block on the block stack representing a function definition.""" @contract(start=int, name=str) def __init__(self, start, name): # The line number where the function starts. self.start = start # The name of the function. self.name = name def process_raise_exits(self, exits, add_arc): for xit in exits: add_arc( xit.lineno, -self.start, xit.cause, f"didn't except from function {self.name!r}", ) return True def process_return_exits(self, exits, add_arc): for xit in exits: add_arc( xit.lineno, -self.start, xit.cause, f"didn't return from function {self.name!r}", ) return True class TryBlock(BlockBase): """A block on the block stack representing a `try` block.""" @contract(handler_start='int|None', final_start='int|None') def __init__(self, handler_start, final_start): # The line number of the first "except" handler, if any. self.handler_start = handler_start # The line number of the "finally:" clause, if any. self.final_start = final_start # The ArcStarts for breaks/continues/returns/raises inside the "try:" # that need to route through the "finally:" clause. self.break_from = set() self.continue_from = set() self.raise_from = set() self.return_from = set() def process_break_exits(self, exits, add_arc): if self.final_start is not None: self.break_from.update(exits) return True return False def process_continue_exits(self, exits, add_arc): if self.final_start is not None: self.continue_from.update(exits) return True return False def process_raise_exits(self, exits, add_arc): if self.handler_start is not None: for xit in exits: add_arc(xit.lineno, self.handler_start, xit.cause) else: assert self.final_start is not None self.raise_from.update(exits) return True def process_return_exits(self, exits, add_arc): if self.final_start is not None: self.return_from.update(exits) return True return False class WithBlock(BlockBase): """A block on the block stack representing a `with` block.""" @contract(start=int) def __init__(self, start): # We only ever use this block if it is needed, so that we don't have to # check this setting in all the methods. assert env.PYBEHAVIOR.exit_through_with # The line number of the with statement. self.start = start # The ArcStarts for breaks/continues/returns/raises inside the "with:" # that need to go through the with-statement while exiting. self.break_from = set() self.continue_from = set() self.return_from = set() def _process_exits(self, exits, add_arc, from_set=None): """Helper to process the four kinds of exits.""" for xit in exits: add_arc(xit.lineno, self.start, xit.cause) if from_set is not None: from_set.update(exits) return True def process_break_exits(self, exits, add_arc): return self._process_exits(exits, add_arc, self.break_from) def process_continue_exits(self, exits, add_arc): return self._process_exits(exits, add_arc, self.continue_from) def process_raise_exits(self, exits, add_arc): return self._process_exits(exits, add_arc) def process_return_exits(self, exits, add_arc): return self._process_exits(exits, add_arc, self.return_from) class ArcStart(collections.namedtuple("Arc", "lineno, cause")): """The information needed to start an arc. `lineno` is the line number the arc starts from. `cause` is an English text fragment used as the `startmsg` for AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an arc wasn't executed, so should fit well into a sentence of the form, "Line 17 didn't run because {cause}." The fragment can include "{lineno}" to have `lineno` interpolated into it. """ def __new__(cls, lineno, cause=None): return super().__new__(cls, lineno, cause) # Define contract words that PyContract doesn't have. # ArcStarts is for a list or set of ArcStart's. new_contract('ArcStarts', lambda seq: all(isinstance(x, ArcStart) for x in seq)) class NodeList: """A synthetic fictitious node, containing a sequence of nodes. This is used when collapsing optimized if-statements, to represent the unconditional execution of one of the clauses. """ def __init__(self, body): self.body = body self.lineno = body[0].lineno # TODO: some add_arcs methods here don't add arcs, they return them. Rename them. # TODO: the cause messages have too many commas. # TODO: Shouldn't the cause messages join with "and" instead of "or"? def ast_parse(text): """How we create an AST parse.""" return ast.parse(neuter_encoding_declaration(text)) class AstArcAnalyzer: """Analyze source text with an AST to find executable code paths.""" @contract(text='unicode', statements=set) def __init__(self, text, statements, multiline): self.root_node = ast_parse(text) # TODO: I think this is happening in too many places. self.statements = {multiline.get(l, l) for l in statements} self.multiline = multiline # Turn on AST dumps with an environment variable. # $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code. dump_ast = bool(int(os.environ.get("COVERAGE_AST_DUMP", 0))) if dump_ast: # pragma: debugging # Dump the AST so that failing tests have helpful output. print(f"Statements: {self.statements}") print(f"Multiline map: {self.multiline}") ast_dump(self.root_node) self.arcs = set() # A map from arc pairs to a list of pairs of sentence fragments: # { (start, end): [(startmsg, endmsg), ...], } # # For an arc from line 17, they should be usable like: # "Line 17 {endmsg}, because {startmsg}" self.missing_arc_fragments = collections.defaultdict(list) self.block_stack = [] # $set_env.py: COVERAGE_TRACK_ARCS - Trace every arc added while parsing code. self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0))) def analyze(self): """Examine the AST tree from `root_node` to determine possible arcs. This sets the `arcs` attribute to be a set of (from, to) line number pairs. """ for node in ast.walk(self.root_node): node_name = node.__class__.__name__ code_object_handler = getattr(self, "_code_object__" + node_name, None) if code_object_handler is not None: code_object_handler(node) @contract(start=int, end=int) def add_arc(self, start, end, smsg=None, emsg=None): """Add an arc, including message fragments to use if it is missing.""" if self.debug: # pragma: debugging print(f"\nAdding arc: ({start}, {end}): {smsg!r}, {emsg!r}") print(short_stack(limit=6)) self.arcs.add((start, end)) if smsg is not None or emsg is not None: self.missing_arc_fragments[(start, end)].append((smsg, emsg)) def nearest_blocks(self): """Yield the blocks in nearest-to-farthest order.""" return reversed(self.block_stack) @contract(returns=int) def line_for_node(self, node): """What is the right line number to use for this node? This dispatches to _line__Node functions where needed. """ node_name = node.__class__.__name__ handler = getattr(self, "_line__" + node_name, None) if handler is not None: return handler(node) else: return node.lineno def _line_decorated(self, node): """Compute first line number for things that can be decorated (classes and functions).""" lineno = node.lineno if env.PYBEHAVIOR.trace_decorated_def: if node.decorator_list: lineno = node.decorator_list[0].lineno return lineno def _line__Assign(self, node): return self.line_for_node(node.value) _line__ClassDef = _line_decorated def _line__Dict(self, node): if node.keys: if node.keys[0] is not None: return node.keys[0].lineno else: # Unpacked dict literals `{**{'a':1}}` have None as the key, # use the value in that case. return node.values[0].lineno else: return node.lineno _line__FunctionDef = _line_decorated _line__AsyncFunctionDef = _line_decorated def _line__List(self, node): if node.elts: return self.line_for_node(node.elts[0]) else: return node.lineno def _line__Module(self, node): if env.PYBEHAVIOR.module_firstline_1: return 1 elif node.body: return self.line_for_node(node.body[0]) else: # Empty modules have no line number, they always start at 1. return 1 # The node types that just flow to the next node with no complications. OK_TO_DEFAULT = { "AnnAssign", "Assign", "Assert", "AugAssign", "Delete", "Expr", "Global", "Import", "ImportFrom", "Nonlocal", "Pass", } @contract(returns='ArcStarts') def add_arcs(self, node): """Add the arcs for `node`. Return a set of ArcStarts, exits from this node to the next. Because a node represents an entire sub-tree (including its children), the exits from a node can be arbitrarily complex:: if something(1): if other(2): doit(3) else: doit(5) There are two exits from line 1: they start at line 3 and line 5. """ node_name = node.__class__.__name__ handler = getattr(self, "_handle__" + node_name, None) if handler is not None: return handler(node) else: # No handler: either it's something that's ok to default (a simple # statement), or it's something we overlooked. if env.TESTING: if node_name not in self.OK_TO_DEFAULT: raise Exception(f"*** Unhandled: {node}") # pragma: only failure # Default for simple statements: one exit from this node. return {ArcStart(self.line_for_node(node))} @one_of("from_start, prev_starts") @contract(returns='ArcStarts') def add_body_arcs(self, body, from_start=None, prev_starts=None): """Add arcs for the body of a compound statement. `body` is the body node. `from_start` is a single `ArcStart` that can be the previous line in flow before this body. `prev_starts` is a set of ArcStarts that can be the previous line. Only one of them should be given. Returns a set of ArcStarts, the exits from this body. """ if prev_starts is None: prev_starts = {from_start} for body_node in body: lineno = self.line_for_node(body_node) first_line = self.multiline.get(lineno, lineno) if first_line not in self.statements: body_node = self.find_non_missing_node(body_node) if body_node is None: continue lineno = self.line_for_node(body_node) for prev_start in prev_starts: self.add_arc(prev_start.lineno, lineno, prev_start.cause) prev_starts = self.add_arcs(body_node) return prev_starts def find_non_missing_node(self, node): """Search `node` looking for a child that has not been optimized away. This might return the node you started with, or it will work recursively to find a child node in self.statements. Returns a node, or None if none of the node remains. """ # This repeats work just done in add_body_arcs, but this duplication # means we can avoid a function call in the 99.9999% case of not # optimizing away statements. lineno = self.line_for_node(node) first_line = self.multiline.get(lineno, lineno) if first_line in self.statements: return node missing_fn = getattr(self, "_missing__" + node.__class__.__name__, None) if missing_fn: node = missing_fn(node) else: node = None return node # Missing nodes: _missing__* # # Entire statements can be optimized away by Python. They will appear in # the AST, but not the bytecode. These functions are called (by # find_non_missing_node) to find a node to use instead of the missing # node. They can return None if the node should truly be gone. def _missing__If(self, node): # If the if-node is missing, then one of its children might still be # here, but not both. So return the first of the two that isn't missing. # Use a NodeList to hold the clauses as a single node. non_missing = self.find_non_missing_node(NodeList(node.body)) if non_missing: return non_missing if node.orelse: return self.find_non_missing_node(NodeList(node.orelse)) return None def _missing__NodeList(self, node): # A NodeList might be a mixture of missing and present nodes. Find the # ones that are present. non_missing_children = [] for child in node.body: child = self.find_non_missing_node(child) if child is not None: non_missing_children.append(child) # Return the simplest representation of the present children. if not non_missing_children: return None if len(non_missing_children) == 1: return non_missing_children[0] return NodeList(non_missing_children) def _missing__While(self, node): body_nodes = self.find_non_missing_node(NodeList(node.body)) if not body_nodes: return None # Make a synthetic While-true node. new_while = ast.While() new_while.lineno = body_nodes.lineno new_while.test = ast.Name() new_while.test.lineno = body_nodes.lineno new_while.test.id = "True" new_while.body = body_nodes.body new_while.orelse = None return new_while def is_constant_expr(self, node): """Is this a compile-time constant?""" node_name = node.__class__.__name__ if node_name in ["Constant", "NameConstant", "Num"]: return "Num" elif node_name == "Name": if node.id in ["True", "False", "None", "__debug__"]: return "Name" return None # In the fullness of time, these might be good tests to write: # while EXPR: # while False: # listcomps hidden deep in other expressions # listcomps hidden in lists: x = [[i for i in range(10)]] # nested function definitions # Exit processing: process_*_exits # # These functions process the four kinds of jump exits: break, continue, # raise, and return. To figure out where an exit goes, we have to look at # the block stack context. For example, a break will jump to the nearest # enclosing loop block, or the nearest enclosing finally block, whichever # is nearer. @contract(exits='ArcStarts') def process_break_exits(self, exits): """Add arcs due to jumps from `exits` being breaks.""" for block in self.nearest_blocks(): # pragma: always breaks if block.process_break_exits(exits, self.add_arc): break @contract(exits='ArcStarts') def process_continue_exits(self, exits): """Add arcs due to jumps from `exits` being continues.""" for block in self.nearest_blocks(): # pragma: always breaks if block.process_continue_exits(exits, self.add_arc): break @contract(exits='ArcStarts') def process_raise_exits(self, exits): """Add arcs due to jumps from `exits` being raises.""" for block in self.nearest_blocks(): if block.process_raise_exits(exits, self.add_arc): break @contract(exits='ArcStarts') def process_return_exits(self, exits): """Add arcs due to jumps from `exits` being returns.""" for block in self.nearest_blocks(): # pragma: always breaks if block.process_return_exits(exits, self.add_arc): break # Handlers: _handle__* # # Each handler deals with a specific AST node type, dispatched from # add_arcs. Handlers return the set of exits from that node, and can # also call self.add_arc to record arcs they find. These functions mirror # the Python semantics of each syntactic construct. See the docstring # for add_arcs to understand the concept of exits from a node. # # Every node type that represents a statement should have a handler, or it # should be listed in OK_TO_DEFAULT. @contract(returns='ArcStarts') def _handle__Break(self, node): here = self.line_for_node(node) break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed") self.process_break_exits([break_start]) return set() @contract(returns='ArcStarts') def _handle_decorated(self, node): """Add arcs for things that can be decorated (classes and functions).""" main_line = last = node.lineno if node.decorator_list: if env.PYBEHAVIOR.trace_decorated_def: last = None for dec_node in node.decorator_list: dec_start = self.line_for_node(dec_node) if last is not None and dec_start != last: self.add_arc(last, dec_start) last = dec_start if env.PYBEHAVIOR.trace_decorated_def: self.add_arc(last, main_line) last = main_line # The definition line may have been missed, but we should have it # in `self.statements`. For some constructs, `line_for_node` is # not what we'd think of as the first line in the statement, so map # it to the first one. if node.body: body_start = self.line_for_node(node.body[0]) body_start = self.multiline.get(body_start, body_start) for lineno in range(last+1, body_start): if lineno in self.statements: self.add_arc(last, lineno) last = lineno # The body is handled in collect_arcs. return {ArcStart(last)} _handle__ClassDef = _handle_decorated @contract(returns='ArcStarts') def _handle__Continue(self, node): here = self.line_for_node(node) continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed") self.process_continue_exits([continue_start]) return set() @contract(returns='ArcStarts') def _handle__For(self, node): start = self.line_for_node(node.iter) self.block_stack.append(LoopBlock(start=start)) from_start = ArcStart(start, cause="the loop on line {lineno} never started") exits = self.add_body_arcs(node.body, from_start=from_start) # Any exit from the body will go back to the top of the loop. for xit in exits: self.add_arc(xit.lineno, start, xit.cause) my_block = self.block_stack.pop() exits = my_block.break_exits from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete") if node.orelse: else_exits = self.add_body_arcs(node.orelse, from_start=from_start) exits |= else_exits else: # No else clause: exit from the for line. exits.add(from_start) return exits _handle__AsyncFor = _handle__For _handle__FunctionDef = _handle_decorated _handle__AsyncFunctionDef = _handle_decorated @contract(returns='ArcStarts') def _handle__If(self, node): start = self.line_for_node(node.test) from_start = ArcStart(start, cause="the condition on line {lineno} was never true") exits = self.add_body_arcs(node.body, from_start=from_start) from_start = ArcStart(start, cause="the condition on line {lineno} was never false") exits |= self.add_body_arcs(node.orelse, from_start=from_start) return exits @contract(returns='ArcStarts') def _handle__Match(self, node): start = self.line_for_node(node) last_start = start exits = set() had_wildcard = False for case in node.cases: case_start = self.line_for_node(case.pattern) if isinstance(case.pattern, ast.MatchAs): had_wildcard = True self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched") from_start = ArcStart(case_start, cause="the pattern on line {lineno} never matched") exits |= self.add_body_arcs(case.body, from_start=from_start) last_start = case_start if not had_wildcard: exits.add(from_start) return exits @contract(returns='ArcStarts') def _handle__NodeList(self, node): start = self.line_for_node(node) exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) return exits @contract(returns='ArcStarts') def _handle__Raise(self, node): here = self.line_for_node(node) raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed") self.process_raise_exits([raise_start]) # `raise` statement jumps away, no exits from here. return set() @contract(returns='ArcStarts') def _handle__Return(self, node): here = self.line_for_node(node) return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed") self.process_return_exits([return_start]) # `return` statement jumps away, no exits from here. return set() @contract(returns='ArcStarts') def _handle__Try(self, node): if node.handlers: handler_start = self.line_for_node(node.handlers[0]) else: handler_start = None if node.finalbody: final_start = self.line_for_node(node.finalbody[0]) else: final_start = None # This is true by virtue of Python syntax: have to have either except # or finally, or both. assert handler_start is not None or final_start is not None try_block = TryBlock(handler_start, final_start) self.block_stack.append(try_block) start = self.line_for_node(node) exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) # We're done with the `try` body, so this block no longer handles # exceptions. We keep the block so the `finally` clause can pick up # flows from the handlers and `else` clause. if node.finalbody: try_block.handler_start = None if node.handlers: # If there are `except` clauses, then raises in the try body # will already jump to them. Start this set over for raises in # `except` and `else`. try_block.raise_from = set() else: self.block_stack.pop() handler_exits = set() if node.handlers: last_handler_start = None for handler_node in node.handlers: handler_start = self.line_for_node(handler_node) if last_handler_start is not None: self.add_arc(last_handler_start, handler_start) last_handler_start = handler_start from_cause = "the exception caught by line {lineno} didn't happen" from_start = ArcStart(handler_start, cause=from_cause) handler_exits |= self.add_body_arcs(handler_node.body, from_start=from_start) if node.orelse: exits = self.add_body_arcs(node.orelse, prev_starts=exits) exits |= handler_exits if node.finalbody: self.block_stack.pop() final_from = ( # You can get to the `finally` clause from: exits | # the exits of the body or `else` clause, try_block.break_from | # or a `break`, try_block.continue_from | # or a `continue`, try_block.raise_from | # or a `raise`, try_block.return_from # or a `return`. ) final_exits = self.add_body_arcs(node.finalbody, prev_starts=final_from) if try_block.break_from: if env.PYBEHAVIOR.finally_jumps_back: for break_line in try_block.break_from: lineno = break_line.lineno cause = break_line.cause.format(lineno=lineno) for final_exit in final_exits: self.add_arc(final_exit.lineno, lineno, cause) breaks = try_block.break_from else: breaks = self._combine_finally_starts(try_block.break_from, final_exits) self.process_break_exits(breaks) if try_block.continue_from: if env.PYBEHAVIOR.finally_jumps_back: for continue_line in try_block.continue_from: lineno = continue_line.lineno cause = continue_line.cause.format(lineno=lineno) for final_exit in final_exits: self.add_arc(final_exit.lineno, lineno, cause) continues = try_block.continue_from else: continues = self._combine_finally_starts(try_block.continue_from, final_exits) self.process_continue_exits(continues) if try_block.raise_from: self.process_raise_exits( self._combine_finally_starts(try_block.raise_from, final_exits) ) if try_block.return_from: if env.PYBEHAVIOR.finally_jumps_back: for return_line in try_block.return_from: lineno = return_line.lineno cause = return_line.cause.format(lineno=lineno) for final_exit in final_exits: self.add_arc(final_exit.lineno, lineno, cause) returns = try_block.return_from else: returns = self._combine_finally_starts(try_block.return_from, final_exits) self.process_return_exits(returns) if exits: # The finally clause's exits are only exits for the try block # as a whole if the try block had some exits to begin with. exits = final_exits return exits @contract(starts='ArcStarts', exits='ArcStarts', returns='ArcStarts') def _combine_finally_starts(self, starts, exits): """Helper for building the cause of `finally` branches. "finally" clauses might not execute their exits, and the causes could be due to a failure to execute any of the exits in the try block. So we use the causes from `starts` as the causes for `exits`. """ causes = [] for start in sorted(starts): if start.cause is not None: causes.append(start.cause.format(lineno=start.lineno)) cause = " or ".join(causes) exits = {ArcStart(xit.lineno, cause) for xit in exits} return exits @contract(returns='ArcStarts') def _handle__While(self, node): start = to_top = self.line_for_node(node.test) constant_test = self.is_constant_expr(node.test) top_is_body0 = False if constant_test: top_is_body0 = True if env.PYBEHAVIOR.keep_constant_test: top_is_body0 = False if top_is_body0: to_top = self.line_for_node(node.body[0]) self.block_stack.append(LoopBlock(start=to_top)) from_start = ArcStart(start, cause="the condition on line {lineno} was never true") exits = self.add_body_arcs(node.body, from_start=from_start) for xit in exits: self.add_arc(xit.lineno, to_top, xit.cause) exits = set() my_block = self.block_stack.pop() exits.update(my_block.break_exits) from_start = ArcStart(start, cause="the condition on line {lineno} was never false") if node.orelse: else_exits = self.add_body_arcs(node.orelse, from_start=from_start) exits |= else_exits else: # No `else` clause: you can exit from the start. if not constant_test: exits.add(from_start) return exits @contract(returns='ArcStarts') def _handle__With(self, node): start = self.line_for_node(node) if env.PYBEHAVIOR.exit_through_with: self.block_stack.append(WithBlock(start=start)) exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) if env.PYBEHAVIOR.exit_through_with: with_block = self.block_stack.pop() with_exit = {ArcStart(start)} if exits: for xit in exits: self.add_arc(xit.lineno, start) exits = with_exit if with_block.break_from: self.process_break_exits( self._combine_finally_starts(with_block.break_from, with_exit) ) if with_block.continue_from: self.process_continue_exits( self._combine_finally_starts(with_block.continue_from, with_exit) ) if with_block.return_from: self.process_return_exits( self._combine_finally_starts(with_block.return_from, with_exit) ) return exits _handle__AsyncWith = _handle__With # Code object dispatchers: _code_object__* # # These methods are used by analyze() as the start of the analysis. # There is one for each construct with a code object. def _code_object__Module(self, node): start = self.line_for_node(node) if node.body: exits = self.add_body_arcs(node.body, from_start=ArcStart(-start)) for xit in exits: self.add_arc(xit.lineno, -start, xit.cause, "didn't exit the module") else: # Empty module. self.add_arc(-start, start) self.add_arc(start, -start) def _code_object__FunctionDef(self, node): start = self.line_for_node(node) self.block_stack.append(FunctionBlock(start=start, name=node.name)) exits = self.add_body_arcs(node.body, from_start=ArcStart(-start)) self.process_return_exits(exits) self.block_stack.pop() _code_object__AsyncFunctionDef = _code_object__FunctionDef def _code_object__ClassDef(self, node): start = self.line_for_node(node) self.add_arc(-start, start) exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) for xit in exits: self.add_arc( xit.lineno, -start, xit.cause, f"didn't exit the body of class {node.name!r}", ) def _make_expression_code_method(noun): # pylint: disable=no-self-argument """A function to make methods for expression-based callable _code_object__ methods.""" def _code_object__expression_callable(self, node): start = self.line_for_node(node) self.add_arc(-start, start, None, f"didn't run the {noun} on line {start}") self.add_arc(start, -start, None, f"didn't finish the {noun} on line {start}") return _code_object__expression_callable _code_object__Lambda = _make_expression_code_method("lambda") _code_object__GeneratorExp = _make_expression_code_method("generator expression") _code_object__DictComp = _make_expression_code_method("dictionary comprehension") _code_object__SetComp = _make_expression_code_method("set comprehension") _code_object__ListComp = _make_expression_code_method("list comprehension") # Code only used when dumping the AST for debugging. SKIP_DUMP_FIELDS = ["ctx"] def _is_simple_value(value): """Is `value` simple enough to be displayed on a single line?""" return ( value in [None, [], (), {}, set()] or isinstance(value, (bytes, int, float, str)) ) def ast_dump(node, depth=0, print=print): # pylint: disable=redefined-builtin """Dump the AST for `node`. This recursively walks the AST, printing a readable version. """ indent = " " * depth lineno = getattr(node, "lineno", None) if lineno is not None: linemark = f" @ {node.lineno},{node.col_offset}" if hasattr(node, "end_lineno"): linemark += ":" if node.end_lineno != node.lineno: linemark += f"{node.end_lineno}," linemark += f"{node.end_col_offset}" else: linemark = "" head = f"{indent}<{node.__class__.__name__}{linemark}" named_fields = [ (name, value) for name, value in ast.iter_fields(node) if name not in SKIP_DUMP_FIELDS ] if not named_fields: print(f"{head}>") elif len(named_fields) == 1 and _is_simple_value(named_fields[0][1]): field_name, value = named_fields[0] print(f"{head} {field_name}: {value!r}>") else: print(head) if 0: print("{}# mro: {}".format( indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]), )) next_indent = indent + " " for field_name, value in named_fields: prefix = f"{next_indent}{field_name}:" if _is_simple_value(value): print(f"{prefix} {value!r}") elif isinstance(value, list): print(f"{prefix} [") for n in value: if _is_simple_value(n): print(f"{next_indent} {n!r}") else: ast_dump(n, depth + 8, print=print) print(f"{next_indent}]") else: print(prefix) ast_dump(value, depth + 8, print=print) print(f"{indent}>")
53,040
Python
37.857875
98
0.579581
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/summary.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Summary reporting""" import sys from coverage.exceptions import CoverageException from coverage.misc import human_sorted_items from coverage.report import get_analysis_to_report from coverage.results import Numbers class SummaryReporter: """A reporter for writing the summary report.""" def __init__(self, coverage): self.coverage = coverage self.config = self.coverage.config self.branches = coverage.get_data().has_arcs() self.outfile = None self.fr_analysis = [] self.skipped_count = 0 self.empty_count = 0 self.total = Numbers(precision=self.config.precision) self.fmt_err = "%s %s: %s" def writeout(self, line): """Write a line to the output, adding a newline.""" self.outfile.write(line.rstrip()) self.outfile.write("\n") def report(self, morfs, outfile=None): """Writes a report summarizing coverage statistics per module. `outfile` is a file object to write the summary to. It must be opened for native strings (bytes on Python 2, Unicode on Python 3). """ self.outfile = outfile or sys.stdout self.coverage.get_data().set_query_contexts(self.config.report_contexts) for fr, analysis in get_analysis_to_report(self.coverage, morfs): self.report_one_file(fr, analysis) # Prepare the formatting strings, header, and column sorting. max_name = max([len(fr.relative_filename()) for (fr, analysis) in self.fr_analysis] + [5]) fmt_name = "%%- %ds " % max_name fmt_skip_covered = "\n%s file%s skipped due to complete coverage." fmt_skip_empty = "\n%s empty file%s skipped." header = (fmt_name % "Name") + " Stmts Miss" fmt_coverage = fmt_name + "%6d %6d" if self.branches: header += " Branch BrPart" fmt_coverage += " %6d %6d" width100 = Numbers(precision=self.config.precision).pc_str_width() header += "%*s" % (width100+4, "Cover") fmt_coverage += "%%%ds%%%%" % (width100+3,) if self.config.show_missing: header += " Missing" fmt_coverage += " %s" rule = "-" * len(header) column_order = dict(name=0, stmts=1, miss=2, cover=-1) if self.branches: column_order.update(dict(branch=3, brpart=4)) # Write the header self.writeout(header) self.writeout(rule) # `lines` is a list of pairs, (line text, line values). The line text # is a string that will be printed, and line values is a tuple of # sortable values. lines = [] for (fr, analysis) in self.fr_analysis: nums = analysis.numbers args = (fr.relative_filename(), nums.n_statements, nums.n_missing) if self.branches: args += (nums.n_branches, nums.n_partial_branches) args += (nums.pc_covered_str,) if self.config.show_missing: args += (analysis.missing_formatted(branches=True),) text = fmt_coverage % args # Add numeric percent coverage so that sorting makes sense. args += (nums.pc_covered,) lines.append((text, args)) # Sort the lines and write them out. sort_option = (self.config.sort or "name").lower() reverse = False if sort_option[0] == '-': reverse = True sort_option = sort_option[1:] elif sort_option[0] == '+': sort_option = sort_option[1:] if sort_option == "name": lines = human_sorted_items(lines, reverse=reverse) else: position = column_order.get(sort_option) if position is None: raise CoverageException(f"Invalid sorting option: {self.config.sort!r}") lines.sort(key=lambda l: (l[1][position], l[0]), reverse=reverse) for line in lines: self.writeout(line[0]) # Write a TOTAL line if we had at least one file. if self.total.n_files > 0: self.writeout(rule) args = ("TOTAL", self.total.n_statements, self.total.n_missing) if self.branches: args += (self.total.n_branches, self.total.n_partial_branches) args += (self.total.pc_covered_str,) if self.config.show_missing: args += ("",) self.writeout(fmt_coverage % args) # Write other final lines. if not self.total.n_files and not self.skipped_count: raise CoverageException("No data to report.") if self.config.skip_covered and self.skipped_count: self.writeout( fmt_skip_covered % (self.skipped_count, 's' if self.skipped_count > 1 else '') ) if self.config.skip_empty and self.empty_count: self.writeout( fmt_skip_empty % (self.empty_count, 's' if self.empty_count > 1 else '') ) return self.total.n_statements and self.total.pc_covered def report_one_file(self, fr, analysis): """Report on just one file, the callback from report().""" nums = analysis.numbers self.total += nums no_missing_lines = (nums.n_missing == 0) no_missing_branches = (nums.n_partial_branches == 0) if self.config.skip_covered and no_missing_lines and no_missing_branches: # Don't report on 100% files. self.skipped_count += 1 elif self.config.skip_empty and nums.n_statements == 0: # Don't report on empty files. self.empty_count += 1 else: self.fr_analysis.append((fr, analysis))
5,905
Python
37.601307
98
0.583065
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/debug.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Control of and utilities for debugging.""" import contextlib import functools import inspect import io import itertools import os import pprint import reprlib import sys import _thread from coverage.misc import isolate_module os = isolate_module(os) # When debugging, it can be helpful to force some options, especially when # debugging the configuration mechanisms you usually use to control debugging! # This is a list of forced debugging options. FORCED_DEBUG = [] FORCED_DEBUG_FILE = None class DebugControl: """Control and output for debugging.""" show_repr_attr = False # For SimpleReprMixin def __init__(self, options, output): """Configure the options and output file for debugging.""" self.options = list(options) + FORCED_DEBUG self.suppress_callers = False filters = [] if self.should('pid'): filters.append(add_pid_and_tid) self.output = DebugOutputFile.get_one( output, show_process=self.should('process'), filters=filters, ) self.raw_output = self.output.outfile def __repr__(self): return f"<DebugControl options={self.options!r} raw_output={self.raw_output!r}>" def should(self, option): """Decide whether to output debug information in category `option`.""" if option == "callers" and self.suppress_callers: return False return (option in self.options) @contextlib.contextmanager def without_callers(self): """A context manager to prevent call stacks from being logged.""" old = self.suppress_callers self.suppress_callers = True try: yield finally: self.suppress_callers = old def write(self, msg): """Write a line of debug output. `msg` is the line to write. A newline will be appended. """ self.output.write(msg+"\n") if self.should('self'): caller_self = inspect.stack()[1][0].f_locals.get('self') if caller_self is not None: self.output.write(f"self: {caller_self!r}\n") if self.should('callers'): dump_stack_frames(out=self.output, skip=1) self.output.flush() class DebugControlString(DebugControl): """A `DebugControl` that writes to a StringIO, for testing.""" def __init__(self, options): super().__init__(options, io.StringIO()) def get_output(self): """Get the output text from the `DebugControl`.""" return self.raw_output.getvalue() class NoDebugging: """A replacement for DebugControl that will never try to do anything.""" def should(self, option): # pylint: disable=unused-argument """Should we write debug messages? Never.""" return False def info_header(label): """Make a nice header string.""" return "--{:-<60s}".format(" "+label+" ") def info_formatter(info): """Produce a sequence of formatted lines from info. `info` is a sequence of pairs (label, data). The produced lines are nicely formatted, ready to print. """ info = list(info) if not info: return label_len = 30 assert all(len(l) < label_len for l, _ in info) for label, data in info: if data == []: data = "-none-" if isinstance(data, (list, set, tuple)): prefix = "%*s:" % (label_len, label) for e in data: yield "%*s %s" % (label_len+1, prefix, e) prefix = "" else: yield "%*s: %s" % (label_len, label, data) def write_formatted_info(writer, header, info): """Write a sequence of (label,data) pairs nicely.""" writer.write(info_header(header)) for line in info_formatter(info): writer.write(" %s" % line) def short_stack(limit=None, skip=0): """Return a string summarizing the call stack. The string is multi-line, with one line per stack frame. Each line shows the function name, the file name, and the line number: ... start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95 import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81 import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159 ... `limit` is the number of frames to include, defaulting to all of them. `skip` is the number of frames to skip, so that debugging functions can call this and not be included in the result. """ stack = inspect.stack()[limit:skip:-1] return "\n".join("%30s : %s:%d" % (t[3], t[1], t[2]) for t in stack) def dump_stack_frames(limit=None, out=None, skip=0): """Print a summary of the stack to stdout, or someplace else.""" out = out or sys.stdout out.write(short_stack(limit=limit, skip=skip+1)) out.write("\n") def clipped_repr(text, numchars=50): """`repr(text)`, but limited to `numchars`.""" r = reprlib.Repr() r.maxstring = numchars return r.repr(text) def short_id(id64): """Given a 64-bit id, make a shorter 16-bit one.""" id16 = 0 for offset in range(0, 64, 16): id16 ^= id64 >> offset return id16 & 0xFFFF def add_pid_and_tid(text): """A filter to add pid and tid to debug messages.""" # Thread ids are useful, but too long. Make a shorter one. tid = f"{short_id(_thread.get_ident()):04x}" text = f"{os.getpid():5d}.{tid}: {text}" return text class SimpleReprMixin: """A mixin implementing a simple __repr__.""" simple_repr_ignore = ['simple_repr_ignore', '$coverage.object_id'] def __repr__(self): show_attrs = ( (k, v) for k, v in self.__dict__.items() if getattr(v, "show_repr_attr", True) and not callable(v) and k not in self.simple_repr_ignore ) return "<{klass} @0x{id:x} {attrs}>".format( klass=self.__class__.__name__, id=id(self), attrs=" ".join(f"{k}={v!r}" for k, v in show_attrs), ) def simplify(v): # pragma: debugging """Turn things which are nearly dict/list/etc into dict/list/etc.""" if isinstance(v, dict): return {k:simplify(vv) for k, vv in v.items()} elif isinstance(v, (list, tuple)): return type(v)(simplify(vv) for vv in v) elif hasattr(v, "__dict__"): return simplify({'.'+k: v for k, v in v.__dict__.items()}) else: return v def pp(v): # pragma: debugging """Debug helper to pretty-print data, including SimpleNamespace objects.""" # Might not be needed in 3.9+ pprint.pprint(simplify(v)) def filter_text(text, filters): """Run `text` through a series of filters. `filters` is a list of functions. Each takes a string and returns a string. Each is run in turn. Returns: the final string that results after all of the filters have run. """ clean_text = text.rstrip() ending = text[len(clean_text):] text = clean_text for fn in filters: lines = [] for line in text.splitlines(): lines.extend(fn(line).splitlines()) text = "\n".join(lines) return text + ending class CwdTracker: # pragma: debugging """A class to add cwd info to debug messages.""" def __init__(self): self.cwd = None def filter(self, text): """Add a cwd message for each new cwd.""" cwd = os.getcwd() if cwd != self.cwd: text = f"cwd is now {cwd!r}\n" + text self.cwd = cwd return text class DebugOutputFile: # pragma: debugging """A file-like object that includes pid and cwd information.""" def __init__(self, outfile, show_process, filters): self.outfile = outfile self.show_process = show_process self.filters = list(filters) if self.show_process: self.filters.insert(0, CwdTracker().filter) self.write(f"New process: executable: {sys.executable!r}\n") self.write("New process: cmd: {!r}\n".format(getattr(sys, 'argv', None))) if hasattr(os, 'getppid'): self.write(f"New process: pid: {os.getpid()!r}, parent pid: {os.getppid()!r}\n") SYS_MOD_NAME = '$coverage.debug.DebugOutputFile.the_one' @classmethod def get_one(cls, fileobj=None, show_process=True, filters=(), interim=False): """Get a DebugOutputFile. If `fileobj` is provided, then a new DebugOutputFile is made with it. If `fileobj` isn't provided, then a file is chosen (COVERAGE_DEBUG_FILE, or stderr), and a process-wide singleton DebugOutputFile is made. `show_process` controls whether the debug file adds process-level information, and filters is a list of other message filters to apply. `filters` are the text filters to apply to the stream to annotate with pids, etc. If `interim` is true, then a future `get_one` can replace this one. """ if fileobj is not None: # Make DebugOutputFile around the fileobj passed. return cls(fileobj, show_process, filters) # Because of the way igor.py deletes and re-imports modules, # this class can be defined more than once. But we really want # a process-wide singleton. So stash it in sys.modules instead of # on a class attribute. Yes, this is aggressively gross. the_one, is_interim = sys.modules.get(cls.SYS_MOD_NAME, (None, True)) if the_one is None or is_interim: if fileobj is None: debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE) if debug_file_name in ("stdout", "stderr"): fileobj = getattr(sys, debug_file_name) elif debug_file_name: fileobj = open(debug_file_name, "a") else: fileobj = sys.stderr the_one = cls(fileobj, show_process, filters) sys.modules[cls.SYS_MOD_NAME] = (the_one, interim) return the_one def write(self, text): """Just like file.write, but filter through all our filters.""" self.outfile.write(filter_text(text, self.filters)) self.outfile.flush() def flush(self): """Flush our file.""" self.outfile.flush() def log(msg, stack=False): # pragma: debugging """Write a log message as forcefully as possible.""" out = DebugOutputFile.get_one(interim=True) out.write(msg+"\n") if stack: dump_stack_frames(out=out, skip=1) def decorate_methods(decorator, butnot=(), private=False): # pragma: debugging """A class decorator to apply a decorator to methods.""" def _decorator(cls): for name, meth in inspect.getmembers(cls, inspect.isroutine): if name not in cls.__dict__: continue if name != "__init__": if not private and name.startswith("_"): continue if name in butnot: continue setattr(cls, name, decorator(meth)) return cls return _decorator def break_in_pudb(func): # pragma: debugging """A function decorator to stop in the debugger for each call.""" @functools.wraps(func) def _wrapper(*args, **kwargs): import pudb sys.stdout = sys.__stdout__ pudb.set_trace() return func(*args, **kwargs) return _wrapper OBJ_IDS = itertools.count() CALLS = itertools.count() OBJ_ID_ATTR = "$coverage.object_id" def show_calls(show_args=True, show_stack=False, show_return=False): # pragma: debugging """A method decorator to debug-log each call to the function.""" def _decorator(func): @functools.wraps(func) def _wrapper(self, *args, **kwargs): oid = getattr(self, OBJ_ID_ATTR, None) if oid is None: oid = f"{os.getpid():08d} {next(OBJ_IDS):04d}" setattr(self, OBJ_ID_ATTR, oid) extra = "" if show_args: eargs = ", ".join(map(repr, args)) ekwargs = ", ".join("{}={!r}".format(*item) for item in kwargs.items()) extra += "(" extra += eargs if eargs and ekwargs: extra += ", " extra += ekwargs extra += ")" if show_stack: extra += " @ " extra += "; ".join(_clean_stack_line(l) for l in short_stack().splitlines()) callid = next(CALLS) msg = f"{oid} {callid:04d} {func.__name__}{extra}\n" DebugOutputFile.get_one(interim=True).write(msg) ret = func(self, *args, **kwargs) if show_return: msg = f"{oid} {callid:04d} {func.__name__} return {ret!r}\n" DebugOutputFile.get_one(interim=True).write(msg) return ret return _wrapper return _decorator def _clean_stack_line(s): # pragma: debugging """Simplify some paths in a stack trace, for compactness.""" s = s.strip() s = s.replace(os.path.dirname(__file__) + '/', '') s = s.replace(os.path.dirname(os.__file__) + '/', '') s = s.replace(sys.prefix + '/', '') return s
13,766
Python
32.825553
96
0.578309
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/pytracer.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Raw data collector for coverage.py.""" import atexit import dis import sys from coverage import env # We need the YIELD_VALUE opcode below, in a comparison-friendly form. YIELD_VALUE = dis.opmap['YIELD_VALUE'] # When running meta-coverage, this file can try to trace itself, which confuses # everything. Don't trace ourselves. THIS_FILE = __file__.rstrip("co") class PyTracer: """Python implementation of the raw data tracer.""" # Because of poor implementations of trace-function-manipulating tools, # the Python trace function must be kept very simple. In particular, there # must be only one function ever set as the trace function, both through # sys.settrace, and as the return value from the trace function. Put # another way, the trace function must always return itself. It cannot # swap in other functions, or return None to avoid tracing a particular # frame. # # The trace manipulator that introduced this restriction is DecoratorTools, # which sets a trace function, and then later restores the pre-existing one # by calling sys.settrace with a function it found in the current frame. # # Systems that use DecoratorTools (or similar trace manipulations) must use # PyTracer to get accurate results. The command-line --timid argument is # used to force the use of this tracer. def __init__(self): # Attributes set from the collector: self.data = None self.trace_arcs = False self.should_trace = None self.should_trace_cache = None self.should_start_context = None self.warn = None # The threading module to use, if any. self.threading = None self.cur_file_data = None self.last_line = 0 # int, but uninitialized. self.cur_file_name = None self.context = None self.started_context = False self.data_stack = [] self.thread = None self.stopped = False self._activity = False self.in_atexit = False # On exit, self.in_atexit = True atexit.register(setattr, self, 'in_atexit', True) def __repr__(self): return "<PyTracer at {}: {} lines in {} files>".format( id(self), sum(len(v) for v in self.data.values()), len(self.data), ) def log(self, marker, *args): """For hard-core logging of what this tracer is doing.""" with open("/tmp/debug_trace.txt", "a") as f: f.write("{} {}[{}]".format( marker, id(self), len(self.data_stack), )) if 0: f.write(".{:x}.{:x}".format( self.thread.ident, self.threading.current_thread().ident, )) f.write(" {}".format(" ".join(map(str, args)))) if 0: f.write(" | ") stack = " / ".join( (fname or "???").rpartition("/")[-1] for _, fname, _, _ in self.data_stack ) f.write(stack) f.write("\n") def _trace(self, frame, event, arg_unused): """The trace function passed to sys.settrace.""" if THIS_FILE in frame.f_code.co_filename: return None #self.log(":", frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + "()", event) if (self.stopped and sys.gettrace() == self._trace): # pylint: disable=comparison-with-callable # The PyTrace.stop() method has been called, possibly by another # thread, let's deactivate ourselves now. if 0: self.log("---\nX", frame.f_code.co_filename, frame.f_lineno) f = frame while f: self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace) f = f.f_back sys.settrace(None) self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = ( self.data_stack.pop() ) return None # if event != 'call' and frame.f_code.co_filename != self.cur_file_name: # self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno) if event == 'call': # Should we start a new context? if self.should_start_context and self.context is None: context_maybe = self.should_start_context(frame) if context_maybe is not None: self.context = context_maybe self.started_context = True self.switch_context(self.context) else: self.started_context = False else: self.started_context = False # Entering a new frame. Decide if we should trace # in this file. self._activity = True self.data_stack.append( ( self.cur_file_data, self.cur_file_name, self.last_line, self.started_context, ) ) filename = frame.f_code.co_filename self.cur_file_name = filename disp = self.should_trace_cache.get(filename) if disp is None: disp = self.should_trace(filename, frame) self.should_trace_cache[filename] = disp self.cur_file_data = None if disp.trace: tracename = disp.source_filename if tracename not in self.data: self.data[tracename] = set() self.cur_file_data = self.data[tracename] # The call event is really a "start frame" event, and happens for # function calls and re-entering generators. The f_lasti field is # -1 for calls, and a real offset for generators. Use <0 as the # line number for calls, and the real line number for generators. if getattr(frame, 'f_lasti', -1) < 0: self.last_line = -frame.f_code.co_firstlineno else: self.last_line = frame.f_lineno elif event == 'line': # Record an executed line. if self.cur_file_data is not None: lineno = frame.f_lineno if self.trace_arcs: self.cur_file_data.add((self.last_line, lineno)) else: self.cur_file_data.add(lineno) self.last_line = lineno elif event == 'return': if self.trace_arcs and self.cur_file_data: # Record an arc leaving the function, but beware that a # "return" event might just mean yielding from a generator. # Jython seems to have an empty co_code, so just assume return. code = frame.f_code.co_code if (not code) or code[frame.f_lasti] != YIELD_VALUE: first = frame.f_code.co_firstlineno self.cur_file_data.add((self.last_line, -first)) # Leaving this function, pop the filename stack. self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = ( self.data_stack.pop() ) # Leaving a context? if self.started_context: self.context = None self.switch_context(None) return self._trace def start(self): """Start this Tracer. Return a Python function suitable for use with sys.settrace(). """ self.stopped = False if self.threading: if self.thread is None: self.thread = self.threading.current_thread() else: if self.thread.ident != self.threading.current_thread().ident: # Re-starting from a different thread!? Don't set the trace # function, but we are marked as running again, so maybe it # will be ok? #self.log("~", "starting on different threads") return self._trace sys.settrace(self._trace) return self._trace def stop(self): """Stop this Tracer.""" # Get the active tracer callback before setting the stop flag to be # able to detect if the tracer was changed prior to stopping it. tf = sys.gettrace() # Set the stop flag. The actual call to sys.settrace(None) will happen # in the self._trace callback itself to make sure to call it from the # right thread. self.stopped = True if self.threading and self.thread.ident != self.threading.current_thread().ident: # Called on a different thread than started us: we can't unhook # ourselves, but we've set the flag that we should stop, so we # won't do any more tracing. #self.log("~", "stopping on different threads") return if self.warn: # PyPy clears the trace function before running atexit functions, # so don't warn if we are in atexit on PyPy and the trace function # has changed to None. dont_warn = (env.PYPY and env.PYPYVERSION >= (5, 4) and self.in_atexit and tf is None) if (not dont_warn) and tf != self._trace: # pylint: disable=comparison-with-callable msg = f"Trace function changed, measurement is likely wrong: {tf!r}" self.warn(msg, slug="trace-changed") def activity(self): """Has there been any activity?""" return self._activity def reset_activity(self): """Reset the activity() flag.""" self._activity = False def get_stats(self): """Return a dictionary of statistics, or None.""" return None
10,174
Python
38.901961
106
0.554551
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/__init__.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Code coverage measurement for Python. Ned Batchelder https://nedbatchelder.com/code/coverage """ import sys from coverage.version import __version__, __url__, version_info from coverage.control import Coverage, process_startup from coverage.data import CoverageData from coverage.exceptions import CoverageException from coverage.plugin import CoveragePlugin, FileTracer, FileReporter from coverage.pytracer import PyTracer # Backward compatibility. coverage = Coverage # On Windows, we encode and decode deep enough that something goes wrong and # the encodings.utf_8 module is loaded and then unloaded, I don't know why. # Adding a reference here prevents it from being unloaded. Yuk. import encodings.utf_8 # pylint: disable=wrong-import-position, wrong-import-order # Because of the "from coverage.control import fooey" lines at the top of the # file, there's an entry for coverage.coverage in sys.modules, mapped to None. # This makes some inspection tools (like pydoc) unable to find the class # coverage.coverage. So remove that entry. try: del sys.modules['coverage.coverage'] except KeyError: pass
1,289
Python
33.864864
87
0.78045
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/version.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """The version and URL for coverage.py""" # This file is exec'ed in setup.py, don't import anything! # Same semantics as sys.version_info. version_info = (6, 1, 2, "final", 0) def _make_version(major, minor, micro, releaselevel, serial): """Create a readable version string from version_info tuple components.""" assert releaselevel in ['alpha', 'beta', 'candidate', 'final'] version = "%d.%d" % (major, minor) if micro: version += ".%d" % (micro,) if releaselevel != 'final': short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel] version += "%s%d" % (short, serial) return version def _make_url(major, minor, micro, releaselevel, serial): """Make the URL people should start at for this version of coverage.py.""" url = "https://coverage.readthedocs.io" if releaselevel != 'final': # For pre-releases, use a version-specific URL. url += "/en/" + _make_version(major, minor, micro, releaselevel, serial) return url __version__ = _make_version(*version_info) __url__ = _make_url(*version_info)
1,251
Python
35.823528
80
0.647482
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/jsonreport.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Json reporting for coverage.py""" import datetime import json import sys from coverage import __version__ from coverage.report import get_analysis_to_report from coverage.results import Numbers class JsonReporter: """A reporter for writing JSON coverage results.""" report_type = "JSON report" def __init__(self, coverage): self.coverage = coverage self.config = self.coverage.config self.total = Numbers(self.config.precision) self.report_data = {} def report(self, morfs, outfile=None): """Generate a json report for `morfs`. `morfs` is a list of modules or file names. `outfile` is a file object to write the json to """ outfile = outfile or sys.stdout coverage_data = self.coverage.get_data() coverage_data.set_query_contexts(self.config.report_contexts) self.report_data["meta"] = { "version": __version__, "timestamp": datetime.datetime.now().isoformat(), "branch_coverage": coverage_data.has_arcs(), "show_contexts": self.config.json_show_contexts, } measured_files = {} for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs): measured_files[file_reporter.relative_filename()] = self.report_one_file( coverage_data, analysis ) self.report_data["files"] = measured_files self.report_data["totals"] = { 'covered_lines': self.total.n_executed, 'num_statements': self.total.n_statements, 'percent_covered': self.total.pc_covered, 'percent_covered_display': self.total.pc_covered_str, 'missing_lines': self.total.n_missing, 'excluded_lines': self.total.n_excluded, } if coverage_data.has_arcs(): self.report_data["totals"].update({ 'num_branches': self.total.n_branches, 'num_partial_branches': self.total.n_partial_branches, 'covered_branches': self.total.n_executed_branches, 'missing_branches': self.total.n_missing_branches, }) json.dump( self.report_data, outfile, indent=4 if self.config.json_pretty_print else None ) return self.total.n_statements and self.total.pc_covered def report_one_file(self, coverage_data, analysis): """Extract the relevant report data for a single file""" nums = analysis.numbers self.total += nums summary = { 'covered_lines': nums.n_executed, 'num_statements': nums.n_statements, 'percent_covered': nums.pc_covered, 'percent_covered_display': nums.pc_covered_str, 'missing_lines': nums.n_missing, 'excluded_lines': nums.n_excluded, } reported_file = { 'executed_lines': sorted(analysis.executed), 'summary': summary, 'missing_lines': sorted(analysis.missing), 'excluded_lines': sorted(analysis.excluded), } if self.config.json_show_contexts: reported_file['contexts'] = analysis.data.contexts_by_lineno(analysis.filename) if coverage_data.has_arcs(): reported_file['summary'].update({ 'num_branches': nums.n_branches, 'num_partial_branches': nums.n_partial_branches, 'covered_branches': nums.n_executed_branches, 'missing_branches': nums.n_missing_branches, }) return reported_file
3,812
Python
35.314285
91
0.594963
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/disposition.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Simple value objects for tracking what to do with files.""" class FileDisposition: """A simple value type for recording what to do with a file.""" pass # FileDisposition "methods": FileDisposition is a pure value object, so it can # be implemented in either C or Python. Acting on them is done with these # functions. def disposition_init(cls, original_filename): """Construct and initialize a new FileDisposition object.""" disp = cls() disp.original_filename = original_filename disp.canonical_filename = original_filename disp.source_filename = None disp.trace = False disp.reason = "" disp.file_tracer = None disp.has_dynamic_filename = False return disp def disposition_debug_msg(disp): """Make a nice debug message of what the FileDisposition is doing.""" if disp.trace: msg = f"Tracing {disp.original_filename!r}" if disp.original_filename != disp.source_filename: msg += f" as {disp.source_filename!r}" if disp.file_tracer: msg += ": will be traced by %r" % disp.file_tracer else: msg = f"Not tracing {disp.original_filename!r}: {disp.reason}" return msg
1,350
Python
32.774999
79
0.682222
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/bytecode.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Bytecode manipulation for coverage.py""" import types def code_objects(code): """Iterate over all the code objects in `code`.""" stack = [code] while stack: # We're going to return the code object on the stack, but first # push its children for later returning. code = stack.pop() for c in code.co_consts: if isinstance(c, types.CodeType): stack.append(c) yield code
609
Python
29.499999
79
0.64532
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/files.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """File wrangling.""" import hashlib import fnmatch import ntpath import os import os.path import posixpath import re import sys from coverage import env from coverage.exceptions import CoverageException from coverage.misc import contract, human_sorted, isolate_module, join_regex os = isolate_module(os) def set_relative_directory(): """Set the directory that `relative_filename` will be relative to.""" global RELATIVE_DIR, CANONICAL_FILENAME_CACHE # The absolute path to our current directory. RELATIVE_DIR = os.path.normcase(abs_file(os.curdir) + os.sep) # Cache of results of calling the canonical_filename() method, to # avoid duplicating work. CANONICAL_FILENAME_CACHE = {} def relative_directory(): """Return the directory that `relative_filename` is relative to.""" return RELATIVE_DIR @contract(returns='unicode') def relative_filename(filename): """Return the relative form of `filename`. The file name will be relative to the current directory when the `set_relative_directory` was called. """ fnorm = os.path.normcase(filename) if fnorm.startswith(RELATIVE_DIR): filename = filename[len(RELATIVE_DIR):] return filename @contract(returns='unicode') def canonical_filename(filename): """Return a canonical file name for `filename`. An absolute path with no redundant components and normalized case. """ if filename not in CANONICAL_FILENAME_CACHE: cf = filename if not os.path.isabs(filename): for path in [os.curdir] + sys.path: if path is None: continue f = os.path.join(path, filename) try: exists = os.path.exists(f) except UnicodeError: exists = False if exists: cf = f break cf = abs_file(cf) CANONICAL_FILENAME_CACHE[filename] = cf return CANONICAL_FILENAME_CACHE[filename] MAX_FLAT = 100 @contract(filename='unicode', returns='unicode') def flat_rootname(filename): """A base for a flat file name to correspond to this file. Useful for writing files about the code where you want all the files in the same directory, but need to differentiate same-named files from different directories. For example, the file a/b/c.py will return 'd_86bbcbe134d28fd2_c_py' """ dirname, basename = ntpath.split(filename) if dirname: fp = hashlib.new("sha3_256", dirname.encode("UTF-8")).hexdigest()[:16] prefix = f"d_{fp}_" else: prefix = "" return prefix + basename.replace(".", "_") if env.WINDOWS: _ACTUAL_PATH_CACHE = {} _ACTUAL_PATH_LIST_CACHE = {} def actual_path(path): """Get the actual path of `path`, including the correct case.""" if path in _ACTUAL_PATH_CACHE: return _ACTUAL_PATH_CACHE[path] head, tail = os.path.split(path) if not tail: # This means head is the drive spec: normalize it. actpath = head.upper() elif not head: actpath = tail else: head = actual_path(head) if head in _ACTUAL_PATH_LIST_CACHE: files = _ACTUAL_PATH_LIST_CACHE[head] else: try: files = os.listdir(head) except Exception: # This will raise OSError, or this bizarre TypeError: # https://bugs.python.org/issue1776160 files = [] _ACTUAL_PATH_LIST_CACHE[head] = files normtail = os.path.normcase(tail) for f in files: if os.path.normcase(f) == normtail: tail = f break actpath = os.path.join(head, tail) _ACTUAL_PATH_CACHE[path] = actpath return actpath else: def actual_path(filename): """The actual path for non-Windows platforms.""" return filename @contract(returns='unicode') def abs_file(path): """Return the absolute normalized form of `path`.""" try: path = os.path.realpath(path) except UnicodeError: pass path = os.path.abspath(path) path = actual_path(path) return path def python_reported_file(filename): """Return the string as Python would describe this file name.""" if env.PYBEHAVIOR.report_absolute_files: filename = os.path.abspath(filename) return filename RELATIVE_DIR = None CANONICAL_FILENAME_CACHE = None set_relative_directory() def isabs_anywhere(filename): """Is `filename` an absolute path on any OS?""" return ntpath.isabs(filename) or posixpath.isabs(filename) def prep_patterns(patterns): """Prepare the file patterns for use in a `FnmatchMatcher`. If a pattern starts with a wildcard, it is used as a pattern as-is. If it does not start with a wildcard, then it is made absolute with the current directory. If `patterns` is None, an empty list is returned. """ prepped = [] for p in patterns or []: if p.startswith(("*", "?")): prepped.append(p) else: prepped.append(abs_file(p)) return prepped class TreeMatcher: """A matcher for files in a tree. Construct with a list of paths, either files or directories. Paths match with the `match` method if they are one of the files, or if they are somewhere in a subtree rooted at one of the directories. """ def __init__(self, paths, name="unknown"): self.original_paths = human_sorted(paths) self.paths = list(map(os.path.normcase, paths)) self.name = name def __repr__(self): return f"<TreeMatcher {self.name} {self.original_paths!r}>" def info(self): """A list of strings for displaying when dumping state.""" return self.original_paths def match(self, fpath): """Does `fpath` indicate a file in one of our trees?""" fpath = os.path.normcase(fpath) for p in self.paths: if fpath.startswith(p): if fpath == p: # This is the same file! return True if fpath[len(p)] == os.sep: # This is a file in the directory return True return False class ModuleMatcher: """A matcher for modules in a tree.""" def __init__(self, module_names, name="unknown"): self.modules = list(module_names) self.name = name def __repr__(self): return f"<ModuleMatcher {self.name} {self.modules!r}>" def info(self): """A list of strings for displaying when dumping state.""" return self.modules def match(self, module_name): """Does `module_name` indicate a module in one of our packages?""" if not module_name: return False for m in self.modules: if module_name.startswith(m): if module_name == m: return True if module_name[len(m)] == '.': # This is a module in the package return True return False class FnmatchMatcher: """A matcher for files by file name pattern.""" def __init__(self, pats, name="unknown"): self.pats = list(pats) self.re = fnmatches_to_regex(self.pats, case_insensitive=env.WINDOWS) self.name = name def __repr__(self): return f"<FnmatchMatcher {self.name} {self.pats!r}>" def info(self): """A list of strings for displaying when dumping state.""" return self.pats def match(self, fpath): """Does `fpath` match one of our file name patterns?""" return self.re.match(fpath) is not None def sep(s): """Find the path separator used in this string, or os.sep if none.""" sep_match = re.search(r"[\\/]", s) if sep_match: the_sep = sep_match.group(0) else: the_sep = os.sep return the_sep def fnmatches_to_regex(patterns, case_insensitive=False, partial=False): """Convert fnmatch patterns to a compiled regex that matches any of them. Slashes are always converted to match either slash or backslash, for Windows support, even when running elsewhere. If `partial` is true, then the pattern will match if the target string starts with the pattern. Otherwise, it must match the entire string. Returns: a compiled regex object. Use the .match method to compare target strings. """ regexes = (fnmatch.translate(pattern) for pattern in patterns) # Python3.7 fnmatch translates "/" as "/". Before that, it translates as "\/", # so we have to deal with maybe a backslash. regexes = (re.sub(r"\\?/", r"[\\\\/]", regex) for regex in regexes) if partial: # fnmatch always adds a \Z to match the whole string, which we don't # want, so we remove the \Z. While removing it, we only replace \Z if # followed by paren (introducing flags), or at end, to keep from # destroying a literal \Z in the pattern. regexes = (re.sub(r'\\Z(\(\?|$)', r'\1', regex) for regex in regexes) flags = 0 if case_insensitive: flags |= re.IGNORECASE compiled = re.compile(join_regex(regexes), flags=flags) return compiled class PathAliases: """A collection of aliases for paths. When combining data files from remote machines, often the paths to source code are different, for example, due to OS differences, or because of serialized checkouts on continuous integration machines. A `PathAliases` object tracks a list of pattern/result pairs, and can map a path through those aliases to produce a unified path. """ def __init__(self, relative=False): self.aliases = [] self.relative = relative def pprint(self): # pragma: debugging """Dump the important parts of the PathAliases, for debugging.""" print(f"Aliases (relative={self.relative}):") for regex, result in self.aliases: print(f"{regex.pattern!r} --> {result!r}") def add(self, pattern, result): """Add the `pattern`/`result` pair to the list of aliases. `pattern` is an `fnmatch`-style pattern. `result` is a simple string. When mapping paths, if a path starts with a match against `pattern`, then that match is replaced with `result`. This models isomorphic source trees being rooted at different places on two different machines. `pattern` can't end with a wildcard component, since that would match an entire tree, and not just its root. """ pattern_sep = sep(pattern) if len(pattern) > 1: pattern = pattern.rstrip(r"\/") # The pattern can't end with a wildcard component. if pattern.endswith("*"): raise CoverageException("Pattern must not end with wildcards.") # The pattern is meant to match a filepath. Let's make it absolute # unless it already is, or is meant to match any prefix. if not pattern.startswith('*') and not isabs_anywhere(pattern + pattern_sep): pattern = abs_file(pattern) if not pattern.endswith(pattern_sep): pattern += pattern_sep # Make a regex from the pattern. regex = fnmatches_to_regex([pattern], case_insensitive=True, partial=True) # Normalize the result: it must end with a path separator. result_sep = sep(result) result = result.rstrip(r"\/") + result_sep self.aliases.append((regex, result)) def map(self, path): """Map `path` through the aliases. `path` is checked against all of the patterns. The first pattern to match is used to replace the root of the path with the result root. Only one pattern is ever used. If no patterns match, `path` is returned unchanged. The separator style in the result is made to match that of the result in the alias. Returns the mapped path. If a mapping has happened, this is a canonical path. If no mapping has happened, it is the original value of `path` unchanged. """ for regex, result in self.aliases: m = regex.match(path) if m: new = path.replace(m.group(0), result) new = new.replace(sep(path), sep(result)) if not self.relative: new = canonical_filename(new) return new return path def find_python_files(dirname): """Yield all of the importable Python files in `dirname`, recursively. To be importable, the files have to be in a directory with a __init__.py, except for `dirname` itself, which isn't required to have one. The assumption is that `dirname` was specified directly, so the user knows best, but sub-directories are checked for a __init__.py to be sure we only find the importable files. """ for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): if i > 0 and '__init__.py' not in filenames: # If a directory doesn't have __init__.py, then it isn't # importable and neither are its files del dirnames[:] continue for filename in filenames: # We're only interested in files that look like reasonable Python # files: Must end with .py or .pyw, and must not have certain funny # characters that probably mean they are editor junk. if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): yield os.path.join(dirpath, filename)
14,061
Python
32.00939
82
0.609701
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/data.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Coverage data for coverage.py. This file had the 4.x JSON data support, which is now gone. This file still has storage-agnostic helpers, and is kept to avoid changing too many imports. CoverageData is now defined in sqldata.py, and imported here to keep the imports working. """ import glob import os.path from coverage.exceptions import CoverageException from coverage.misc import file_be_gone from coverage.sqldata import CoverageData def line_counts(data, fullpath=False): """Return a dict summarizing the line coverage data. Keys are based on the file names, and values are the number of executed lines. If `fullpath` is true, then the keys are the full pathnames of the files, otherwise they are the basenames of the files. Returns a dict mapping file names to counts of lines. """ summ = {} if fullpath: filename_fn = lambda f: f else: filename_fn = os.path.basename for filename in data.measured_files(): summ[filename_fn(filename)] = len(data.lines(filename)) return summ def add_data_to_hash(data, filename, hasher): """Contribute `filename`'s data to the `hasher`. `hasher` is a `coverage.misc.Hasher` instance to be updated with the file's data. It should only get the results data, not the run data. """ if data.has_arcs(): hasher.update(sorted(data.arcs(filename) or [])) else: hasher.update(sorted(data.lines(filename) or [])) hasher.update(data.file_tracer(filename)) def combine_parallel_data( data, aliases=None, data_paths=None, strict=False, keep=False, message=None, ): """Combine a number of data files together. Treat `data.filename` as a file prefix, and combine the data from all of the data files starting with that prefix plus a dot. If `aliases` is provided, it's a `PathAliases` object that is used to re-map paths to match the local machine's. If `data_paths` is provided, it is a list of directories or files to combine. Directories are searched for files that start with `data.filename` plus dot as a prefix, and those files are combined. If `data_paths` is not provided, then the directory portion of `data.filename` is used as the directory to search for data files. Unless `keep` is True every data file found and combined is then deleted from disk. If a file cannot be read, a warning will be issued, and the file will not be deleted. If `strict` is true, and no files are found to combine, an error is raised. """ # Because of the os.path.abspath in the constructor, data_dir will # never be an empty string. data_dir, local = os.path.split(data.base_filename()) localdot = local + '.*' data_paths = data_paths or [data_dir] files_to_combine = [] for p in data_paths: if os.path.isfile(p): files_to_combine.append(os.path.abspath(p)) elif os.path.isdir(p): pattern = os.path.join(os.path.abspath(p), localdot) files_to_combine.extend(glob.glob(pattern)) else: raise CoverageException(f"Couldn't combine from non-existent path '{p}'") if strict and not files_to_combine: raise CoverageException("No data to combine") files_combined = 0 for f in files_to_combine: if f == data.data_filename(): # Sometimes we are combining into a file which is one of the # parallel files. Skip that file. if data._debug.should('dataio'): data._debug.write(f"Skipping combining ourself: {f!r}") continue if data._debug.should('dataio'): data._debug.write(f"Combining data file {f!r}") try: new_data = CoverageData(f, debug=data._debug) new_data.read() except CoverageException as exc: if data._warn: # The CoverageException has the file name in it, so just # use the message as the warning. data._warn(str(exc)) else: data.update(new_data, aliases=aliases) files_combined += 1 if message: message(f"Combined data file {os.path.relpath(f)}") if not keep: if data._debug.should('dataio'): data._debug.write(f"Deleting combined data file {f!r}") file_be_gone(f) if strict and not files_combined: raise CoverageException("No usable data files")
4,680
Python
34.732824
97
0.648932
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/env.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Determine facts about the environment.""" import os import platform import sys # Operating systems. WINDOWS = sys.platform == "win32" LINUX = sys.platform.startswith("linux") # Python implementations. CPYTHON = (platform.python_implementation() == "CPython") PYPY = (platform.python_implementation() == "PyPy") JYTHON = (platform.python_implementation() == "Jython") IRONPYTHON = (platform.python_implementation() == "IronPython") # Python versions. We amend version_info with one more value, a zero if an # official version, or 1 if built from source beyond an official version. PYVERSION = sys.version_info + (int(platform.python_version()[-1] == "+"),) if PYPY: PYPYVERSION = sys.pypy_version_info # Python behavior. class PYBEHAVIOR: """Flags indicating this Python's behavior.""" # Does Python conform to PEP626, Precise line numbers for debugging and other tools. # https://www.python.org/dev/peps/pep-0626 pep626 = CPYTHON and (PYVERSION > (3, 10, 0, 'alpha', 4)) # Is "if __debug__" optimized away? if PYPY: optimize_if_debug = True else: optimize_if_debug = not pep626 # Is "if not __debug__" optimized away? optimize_if_not_debug = (not PYPY) and (PYVERSION >= (3, 7, 0, 'alpha', 4)) if pep626: optimize_if_not_debug = False if PYPY: optimize_if_not_debug = True # Is "if not __debug__" optimized away even better? optimize_if_not_debug2 = (not PYPY) and (PYVERSION >= (3, 8, 0, 'beta', 1)) if pep626: optimize_if_not_debug2 = False # Yet another way to optimize "if not __debug__"? optimize_if_not_debug3 = (PYPY and PYVERSION >= (3, 8)) # Can co_lnotab have negative deltas? negative_lnotab = not (PYPY and PYPYVERSION < (7, 2)) # Do .pyc files conform to PEP 552? Hash-based pyc's. hashed_pyc_pep552 = (PYVERSION >= (3, 7, 0, 'alpha', 4)) # Python 3.7.0b3 changed the behavior of the sys.path[0] entry for -m. It # used to be an empty string (meaning the current directory). It changed # to be the actual path to the current directory, so that os.chdir wouldn't # affect the outcome. actual_syspath0_dash_m = ( (CPYTHON and (PYVERSION >= (3, 7, 0, 'beta', 3))) or (PYPY and (PYPYVERSION >= (7, 3, 4))) ) # 3.7 changed how functions with only docstrings are numbered. docstring_only_function = (not PYPY) and ((3, 7, 0, 'beta', 5) <= PYVERSION <= (3, 10)) # When a break/continue/return statement in a try block jumps to a finally # block, does the finally block do the break/continue/return (pre-3.8), or # does the finally jump back to the break/continue/return (3.8) to do the # work? finally_jumps_back = ((3, 8) <= PYVERSION < (3, 10)) # When a function is decorated, does the trace function get called for the # @-line and also the def-line (new behavior in 3.8)? Or just the @-line # (old behavior)? trace_decorated_def = (CPYTHON and PYVERSION >= (3, 8)) # Are while-true loops optimized into absolute jumps with no loop setup? nix_while_true = (PYVERSION >= (3, 8)) # Python 3.9a1 made sys.argv[0] and other reported files absolute paths. report_absolute_files = (PYVERSION >= (3, 9)) # Lines after break/continue/return/raise are no longer compiled into the # bytecode. They used to be marked as missing, now they aren't executable. omit_after_jump = pep626 # PyPy has always omitted statements after return. omit_after_return = omit_after_jump or PYPY # Modules used to have firstlineno equal to the line number of the first # real line of code. Now they always start at 1. module_firstline_1 = pep626 # Are "if 0:" lines (and similar) kept in the compiled code? keep_constant_test = pep626 # When leaving a with-block, do we visit the with-line again for the exit? exit_through_with = (PYVERSION >= (3, 10, 0, 'beta')) # Match-case construct. match_case = (PYVERSION >= (3, 10)) # Some words are keywords in some places, identifiers in other places. soft_keywords = (PYVERSION >= (3, 10)) # Coverage.py specifics. # Are we using the C-implemented trace function? C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c' # Are we coverage-measuring ourselves? METACOV = os.getenv('COVERAGE_COVERAGE', '') != '' # Are we running our test suite? # Even when running tests, you can use COVERAGE_TESTING=0 to disable the # test-specific behavior like contracts. TESTING = os.getenv('COVERAGE_TESTING', '') == 'True' # Environment COVERAGE_NO_CONTRACTS=1 can turn off contracts while debugging # tests to remove noise from stack traces. # $set_env.py: COVERAGE_NO_CONTRACTS - Disable PyContracts to simplify stack traces. USE_CONTRACTS = TESTING and not bool(int(os.environ.get("COVERAGE_NO_CONTRACTS", 0)))
5,011
Python
36.969697
91
0.672121
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/html.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """HTML reporting for coverage.py.""" import datetime import json import os import re import shutil import types import coverage from coverage.data import add_data_to_hash from coverage.exceptions import CoverageException from coverage.files import flat_rootname from coverage.misc import ensure_dir, file_be_gone, Hasher, isolate_module, format_local_datetime from coverage.misc import human_sorted from coverage.report import get_analysis_to_report from coverage.results import Numbers from coverage.templite import Templite os = isolate_module(os) def data_filename(fname): """Return the path to an "htmlfiles" data file of ours. """ static_dir = os.path.join(os.path.dirname(__file__), "htmlfiles") static_filename = os.path.join(static_dir, fname) return static_filename def read_data(fname): """Return the contents of a data file of ours.""" with open(data_filename(fname)) as data_file: return data_file.read() def write_html(fname, html): """Write `html` to `fname`, properly encoded.""" html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n" with open(fname, "wb") as fout: fout.write(html.encode('ascii', 'xmlcharrefreplace')) class HtmlDataGeneration: """Generate structured data to be turned into HTML reports.""" EMPTY = "(empty)" def __init__(self, cov): self.coverage = cov self.config = self.coverage.config data = self.coverage.get_data() self.has_arcs = data.has_arcs() if self.config.show_contexts: if data.measured_contexts() == {""}: self.coverage._warn("No contexts were measured") data.set_query_contexts(self.config.report_contexts) def data_for_file(self, fr, analysis): """Produce the data needed for one file's report.""" if self.has_arcs: missing_branch_arcs = analysis.missing_branch_arcs() arcs_executed = analysis.arcs_executed() if self.config.show_contexts: contexts_by_lineno = analysis.data.contexts_by_lineno(analysis.filename) lines = [] for lineno, tokens in enumerate(fr.source_token_lines(), start=1): # Figure out how to mark this line. category = None short_annotations = [] long_annotations = [] if lineno in analysis.excluded: category = 'exc' elif lineno in analysis.missing: category = 'mis' elif self.has_arcs and lineno in missing_branch_arcs: category = 'par' for b in missing_branch_arcs[lineno]: if b < 0: short_annotations.append("exit") else: short_annotations.append(b) long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed)) elif lineno in analysis.statements: category = 'run' contexts = contexts_label = None context_list = None if category and self.config.show_contexts: contexts = human_sorted(c or self.EMPTY for c in contexts_by_lineno.get(lineno, ())) if contexts == [self.EMPTY]: contexts_label = self.EMPTY else: contexts_label = f"{len(contexts)} ctx" context_list = contexts lines.append(types.SimpleNamespace( tokens=tokens, number=lineno, category=category, statement=(lineno in analysis.statements), contexts=contexts, contexts_label=contexts_label, context_list=context_list, short_annotations=short_annotations, long_annotations=long_annotations, )) file_data = types.SimpleNamespace( relative_filename=fr.relative_filename(), nums=analysis.numbers, lines=lines, ) return file_data class HtmlReporter: """HTML reporting.""" # These files will be copied from the htmlfiles directory to the output # directory. STATIC_FILES = [ "style.css", "coverage_html.js", "keybd_closed.png", "keybd_open.png", "favicon_32.png", ] def __init__(self, cov): self.coverage = cov self.config = self.coverage.config self.directory = self.config.html_dir self.skip_covered = self.config.html_skip_covered if self.skip_covered is None: self.skip_covered = self.config.skip_covered self.skip_empty = self.config.html_skip_empty if self.skip_empty is None: self.skip_empty = self.config.skip_empty self.skipped_covered_count = 0 self.skipped_empty_count = 0 title = self.config.html_title if self.config.extra_css: self.extra_css = os.path.basename(self.config.extra_css) else: self.extra_css = None self.data = self.coverage.get_data() self.has_arcs = self.data.has_arcs() self.file_summaries = [] self.all_files_nums = [] self.incr = IncrementalChecker(self.directory) self.datagen = HtmlDataGeneration(self.coverage) self.totals = Numbers(precision=self.config.precision) self.template_globals = { # Functions available in the templates. 'escape': escape, 'pair': pair, 'len': len, # Constants for this report. '__url__': coverage.__url__, '__version__': coverage.__version__, 'title': title, 'time_stamp': format_local_datetime(datetime.datetime.now()), 'extra_css': self.extra_css, 'has_arcs': self.has_arcs, 'show_contexts': self.config.show_contexts, # Constants for all reports. # These css classes determine which lines are highlighted by default. 'category': { 'exc': 'exc show_exc', 'mis': 'mis show_mis', 'par': 'par run show_par', 'run': 'run', } } self.pyfile_html_source = read_data("pyfile.html") self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals) def report(self, morfs): """Generate an HTML report for `morfs`. `morfs` is a list of modules or file names. """ # Read the status data and check that this run used the same # global data as the last run. self.incr.read() self.incr.check_global_data(self.config, self.pyfile_html_source) # Process all the files. for fr, analysis in get_analysis_to_report(self.coverage, morfs): self.html_file(fr, analysis) if not self.all_files_nums: raise CoverageException("No data to report.") self.totals = sum(self.all_files_nums) # Write the index file. self.index_file() self.make_local_static_report_files() return self.totals.n_statements and self.totals.pc_covered def make_local_static_report_files(self): """Make local instances of static files for HTML report.""" # The files we provide must always be copied. for static in self.STATIC_FILES: shutil.copyfile(data_filename(static), os.path.join(self.directory, static)) # .gitignore can't be copied from the source tree because it would # prevent the static files from being checked in. with open(os.path.join(self.directory, ".gitignore"), "w") as fgi: fgi.write("# Created by coverage.py\n*\n") # The user may have extra CSS they want copied. if self.extra_css: shutil.copyfile(self.config.extra_css, os.path.join(self.directory, self.extra_css)) def html_file(self, fr, analysis): """Generate an HTML file for one source file.""" rootname = flat_rootname(fr.relative_filename()) html_filename = rootname + ".html" ensure_dir(self.directory) html_path = os.path.join(self.directory, html_filename) # Get the numbers for this file. nums = analysis.numbers self.all_files_nums.append(nums) if self.skip_covered: # Don't report on 100% files. no_missing_lines = (nums.n_missing == 0) no_missing_branches = (nums.n_partial_branches == 0) if no_missing_lines and no_missing_branches: # If there's an existing file, remove it. file_be_gone(html_path) self.skipped_covered_count += 1 return if self.skip_empty: # Don't report on empty files. if nums.n_statements == 0: file_be_gone(html_path) self.skipped_empty_count += 1 return # Find out if the file on disk is already correct. if self.incr.can_skip_file(self.data, fr, rootname): self.file_summaries.append(self.incr.index_info(rootname)) return # Write the HTML page for this file. file_data = self.datagen.data_for_file(fr, analysis) for ldata in file_data.lines: # Build the HTML for the line. html = [] for tok_type, tok_text in ldata.tokens: if tok_type == "ws": html.append(escape(tok_text)) else: tok_html = escape(tok_text) or '&nbsp;' html.append( f'<span class="{tok_type}">{tok_html}</span>' ) ldata.html = ''.join(html) if ldata.short_annotations: # 202F is NARROW NO-BREAK SPACE. # 219B is RIGHTWARDS ARROW WITH STROKE. ldata.annotate = ",&nbsp;&nbsp; ".join( f"{ldata.number}&#x202F;&#x219B;&#x202F;{d}" for d in ldata.short_annotations ) else: ldata.annotate = None if ldata.long_annotations: longs = ldata.long_annotations if len(longs) == 1: ldata.annotate_long = longs[0] else: ldata.annotate_long = "{:d} missed branches: {}".format( len(longs), ", ".join( f"{num:d}) {ann_long}" for num, ann_long in enumerate(longs, start=1) ), ) else: ldata.annotate_long = None css_classes = [] if ldata.category: css_classes.append(self.template_globals['category'][ldata.category]) ldata.css_class = ' '.join(css_classes) or "pln" html = self.source_tmpl.render(file_data.__dict__) write_html(html_path, html) # Save this file's information for the index file. index_info = { 'nums': nums, 'html_filename': html_filename, 'relative_filename': fr.relative_filename(), } self.file_summaries.append(index_info) self.incr.set_index_info(rootname, index_info) def index_file(self): """Write the index.html file for this report.""" index_tmpl = Templite(read_data("index.html"), self.template_globals) skipped_covered_msg = skipped_empty_msg = "" if self.skipped_covered_count: msg = "{} {} skipped due to complete coverage." skipped_covered_msg = msg.format( self.skipped_covered_count, "file" if self.skipped_covered_count == 1 else "files", ) if self.skipped_empty_count: msg = "{} empty {} skipped." skipped_empty_msg = msg.format( self.skipped_empty_count, "file" if self.skipped_empty_count == 1 else "files", ) html = index_tmpl.render({ 'files': self.file_summaries, 'totals': self.totals, 'skipped_covered_msg': skipped_covered_msg, 'skipped_empty_msg': skipped_empty_msg, }) index_file = os.path.join(self.directory, "index.html") write_html(index_file, html) self.coverage._message(f"Wrote HTML report to {index_file}") # Write the latest hashes for next time. self.incr.write() class IncrementalChecker: """Logic and data to support incremental reporting.""" STATUS_FILE = "status.json" STATUS_FORMAT = 2 # pylint: disable=wrong-spelling-in-comment,useless-suppression # The data looks like: # # { # "format": 2, # "globals": "540ee119c15d52a68a53fe6f0897346d", # "version": "4.0a1", # "files": { # "cogapp___init__": { # "hash": "e45581a5b48f879f301c0f30bf77a50c", # "index": { # "html_filename": "cogapp___init__.html", # "relative_filename": "cogapp/__init__", # "nums": [ 1, 14, 0, 0, 0, 0, 0 ] # } # }, # ... # "cogapp_whiteutils": { # "hash": "8504bb427fc488c4176809ded0277d51", # "index": { # "html_filename": "cogapp_whiteutils.html", # "relative_filename": "cogapp/whiteutils", # "nums": [ 1, 59, 0, 1, 28, 2, 2 ] # } # } # } # } def __init__(self, directory): self.directory = directory self.reset() def reset(self): """Initialize to empty. Causes all files to be reported.""" self.globals = '' self.files = {} def read(self): """Read the information we stored last time.""" usable = False try: status_file = os.path.join(self.directory, self.STATUS_FILE) with open(status_file) as fstatus: status = json.load(fstatus) except (OSError, ValueError): usable = False else: usable = True if status['format'] != self.STATUS_FORMAT: usable = False elif status['version'] != coverage.__version__: usable = False if usable: self.files = {} for filename, fileinfo in status['files'].items(): fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums']) self.files[filename] = fileinfo self.globals = status['globals'] else: self.reset() def write(self): """Write the current status.""" status_file = os.path.join(self.directory, self.STATUS_FILE) files = {} for filename, fileinfo in self.files.items(): fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args() files[filename] = fileinfo status = { 'format': self.STATUS_FORMAT, 'version': coverage.__version__, 'globals': self.globals, 'files': files, } with open(status_file, "w") as fout: json.dump(status, fout, separators=(',', ':')) def check_global_data(self, *data): """Check the global data that can affect incremental reporting.""" m = Hasher() for d in data: m.update(d) these_globals = m.hexdigest() if self.globals != these_globals: self.reset() self.globals = these_globals def can_skip_file(self, data, fr, rootname): """Can we skip reporting this file? `data` is a CoverageData object, `fr` is a `FileReporter`, and `rootname` is the name being used for the file. """ m = Hasher() m.update(fr.source().encode('utf-8')) add_data_to_hash(data, fr.filename, m) this_hash = m.hexdigest() that_hash = self.file_hash(rootname) if this_hash == that_hash: # Nothing has changed to require the file to be reported again. return True else: self.set_file_hash(rootname, this_hash) return False def file_hash(self, fname): """Get the hash of `fname`'s contents.""" return self.files.get(fname, {}).get('hash', '') def set_file_hash(self, fname, val): """Set the hash of `fname`'s contents.""" self.files.setdefault(fname, {})['hash'] = val def index_info(self, fname): """Get the information for index.html for `fname`.""" return self.files.get(fname, {}).get('index', {}) def set_index_info(self, fname, info): """Set the information for index.html for `fname`.""" self.files.setdefault(fname, {})['index'] = info # Helpers for templates and generating HTML def escape(t): """HTML-escape the text in `t`. This is only suitable for HTML text, not attributes. """ # Convert HTML special chars into HTML entities. return t.replace("&", "&amp;").replace("<", "&lt;") def pair(ratio): """Format a pair of numbers so JavaScript can read them in an attribute.""" return "%s %s" % ratio
17,694
Python
33.970356
100
0.549452
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/numbits.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ Functions to manipulate packed binary representations of number sets. To save space, coverage stores sets of line numbers in SQLite using a packed binary representation called a numbits. A numbits is a set of positive integers. A numbits is stored as a blob in the database. The exact meaning of the bytes in the blobs should be considered an implementation detail that might change in the future. Use these functions to work with those binary blobs of data. """ import json from itertools import zip_longest from coverage.misc import contract, new_contract def _to_blob(b): """Convert a bytestring into a type SQLite will accept for a blob.""" return b new_contract('blob', lambda v: isinstance(v, bytes)) @contract(nums='Iterable', returns='blob') def nums_to_numbits(nums): """Convert `nums` into a numbits. Arguments: nums: a reusable iterable of integers, the line numbers to store. Returns: A binary blob. """ try: nbytes = max(nums) // 8 + 1 except ValueError: # nums was empty. return _to_blob(b'') b = bytearray(nbytes) for num in nums: b[num//8] |= 1 << num % 8 return _to_blob(bytes(b)) @contract(numbits='blob', returns='list[int]') def numbits_to_nums(numbits): """Convert a numbits into a list of numbers. Arguments: numbits: a binary blob, the packed number set. Returns: A list of ints. When registered as a SQLite function by :func:`register_sqlite_functions`, this returns a string, a JSON-encoded list of ints. """ nums = [] for byte_i, byte in enumerate(numbits): for bit_i in range(8): if (byte & (1 << bit_i)): nums.append(byte_i * 8 + bit_i) return nums @contract(numbits1='blob', numbits2='blob', returns='blob') def numbits_union(numbits1, numbits2): """Compute the union of two numbits. Returns: A new numbits, the union of `numbits1` and `numbits2`. """ byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) return _to_blob(bytes(b1 | b2 for b1, b2 in byte_pairs)) @contract(numbits1='blob', numbits2='blob', returns='blob') def numbits_intersection(numbits1, numbits2): """Compute the intersection of two numbits. Returns: A new numbits, the intersection `numbits1` and `numbits2`. """ byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) intersection_bytes = bytes(b1 & b2 for b1, b2 in byte_pairs) return _to_blob(intersection_bytes.rstrip(b'\0')) @contract(numbits1='blob', numbits2='blob', returns='bool') def numbits_any_intersection(numbits1, numbits2): """Is there any number that appears in both numbits? Determine whether two number sets have a non-empty intersection. This is faster than computing the intersection. Returns: A bool, True if there is any number in both `numbits1` and `numbits2`. """ byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) return any(b1 & b2 for b1, b2 in byte_pairs) @contract(num='int', numbits='blob', returns='bool') def num_in_numbits(num, numbits): """Does the integer `num` appear in `numbits`? Returns: A bool, True if `num` is a member of `numbits`. """ nbyte, nbit = divmod(num, 8) if nbyte >= len(numbits): return False return bool(numbits[nbyte] & (1 << nbit)) def register_sqlite_functions(connection): """ Define numbits functions in a SQLite connection. This defines these functions for use in SQLite statements: * :func:`numbits_union` * :func:`numbits_intersection` * :func:`numbits_any_intersection` * :func:`num_in_numbits` * :func:`numbits_to_nums` `connection` is a :class:`sqlite3.Connection <python:sqlite3.Connection>` object. After creating the connection, pass it to this function to register the numbits functions. Then you can use numbits functions in your queries:: import sqlite3 from coverage.numbits import register_sqlite_functions conn = sqlite3.connect('example.db') register_sqlite_functions(conn) c = conn.cursor() # Kind of a nonsense query: find all the files and contexts that # executed line 47 in any file: c.execute( "select file_id, context_id from line_bits where num_in_numbits(?, numbits)", (47,) ) """ connection.create_function("numbits_union", 2, numbits_union) connection.create_function("numbits_intersection", 2, numbits_intersection) connection.create_function("numbits_any_intersection", 2, numbits_any_intersection) connection.create_function("num_in_numbits", 2, num_in_numbits) connection.create_function("numbits_to_nums", 1, lambda b: json.dumps(numbits_to_nums(b)))
4,995
Python
30.821656
94
0.669469
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/execfile.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Execute files of Python code.""" import importlib.machinery import importlib.util import inspect import marshal import os import struct import sys import types from coverage import env from coverage.exceptions import CoverageException, ExceptionDuringRun, NoCode, NoSource from coverage.files import canonical_filename, python_reported_file from coverage.misc import isolate_module from coverage.phystokens import compile_unicode from coverage.python import get_python_source os = isolate_module(os) PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER class DummyLoader: """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader. Currently only implements the .fullname attribute """ def __init__(self, fullname, *_args): self.fullname = fullname def find_module(modulename): """Find the module named `modulename`. Returns the file path of the module, the name of the enclosing package, and the spec. """ try: spec = importlib.util.find_spec(modulename) except ImportError as err: raise NoSource(str(err)) from err if not spec: raise NoSource(f"No module named {modulename!r}") pathname = spec.origin packagename = spec.name if spec.submodule_search_locations: mod_main = modulename + ".__main__" spec = importlib.util.find_spec(mod_main) if not spec: raise NoSource( f"No module named {mod_main}; " + f"{modulename!r} is a package and cannot be directly executed" ) pathname = spec.origin packagename = spec.name packagename = packagename.rpartition(".")[0] return pathname, packagename, spec class PyRunner: """Multi-stage execution of Python code. This is meant to emulate real Python execution as closely as possible. """ def __init__(self, args, as_module=False): self.args = args self.as_module = as_module self.arg0 = args[0] self.package = self.modulename = self.pathname = self.loader = self.spec = None def prepare(self): """Set sys.path properly. This needs to happen before any importing, and without importing anything. """ if self.as_module: if env.PYBEHAVIOR.actual_syspath0_dash_m: path0 = os.getcwd() else: path0 = "" elif os.path.isdir(self.arg0): # Running a directory means running the __main__.py file in that # directory. path0 = self.arg0 else: path0 = os.path.abspath(os.path.dirname(self.arg0)) if os.path.isdir(sys.path[0]): # sys.path fakery. If we are being run as a command, then sys.path[0] # is the directory of the "coverage" script. If this is so, replace # sys.path[0] with the directory of the file we're running, or the # current directory when running modules. If it isn't so, then we # don't know what's going on, and just leave it alone. top_file = inspect.stack()[-1][0].f_code.co_filename sys_path_0_abs = os.path.abspath(sys.path[0]) top_file_dir_abs = os.path.abspath(os.path.dirname(top_file)) sys_path_0_abs = canonical_filename(sys_path_0_abs) top_file_dir_abs = canonical_filename(top_file_dir_abs) if sys_path_0_abs != top_file_dir_abs: path0 = None else: # sys.path[0] is a file. Is the next entry the directory containing # that file? if sys.path[1] == os.path.dirname(sys.path[0]): # Can it be right to always remove that? del sys.path[1] if path0 is not None: sys.path[0] = python_reported_file(path0) def _prepare2(self): """Do more preparation to run Python code. Includes finding the module to run and adjusting sys.argv[0]. This method is allowed to import code. """ if self.as_module: self.modulename = self.arg0 pathname, self.package, self.spec = find_module(self.modulename) if self.spec is not None: self.modulename = self.spec.name self.loader = DummyLoader(self.modulename) self.pathname = os.path.abspath(pathname) self.args[0] = self.arg0 = self.pathname elif os.path.isdir(self.arg0): # Running a directory means running the __main__.py file in that # directory. for ext in [".py", ".pyc", ".pyo"]: try_filename = os.path.join(self.arg0, "__main__" + ext) # 3.8.10 changed how files are reported when running a # directory. But I'm not sure how far this change is going to # spread, so I'll just hard-code it here for now. if env.PYVERSION >= (3, 8, 10): try_filename = os.path.abspath(try_filename) if os.path.exists(try_filename): self.arg0 = try_filename break else: raise NoSource("Can't find '__main__' module in '%s'" % self.arg0) # Make a spec. I don't know if this is the right way to do it. try_filename = python_reported_file(try_filename) self.spec = importlib.machinery.ModuleSpec("__main__", None, origin=try_filename) self.spec.has_location = True self.package = "" self.loader = DummyLoader("__main__") else: self.loader = DummyLoader("__main__") self.arg0 = python_reported_file(self.arg0) def run(self): """Run the Python code!""" self._prepare2() # Create a module to serve as __main__ main_mod = types.ModuleType('__main__') from_pyc = self.arg0.endswith((".pyc", ".pyo")) main_mod.__file__ = self.arg0 if from_pyc: main_mod.__file__ = main_mod.__file__[:-1] if self.package is not None: main_mod.__package__ = self.package main_mod.__loader__ = self.loader if self.spec is not None: main_mod.__spec__ = self.spec main_mod.__builtins__ = sys.modules['builtins'] sys.modules['__main__'] = main_mod # Set sys.argv properly. sys.argv = self.args try: # Make a code object somehow. if from_pyc: code = make_code_from_pyc(self.arg0) else: code = make_code_from_py(self.arg0) except CoverageException: raise except Exception as exc: msg = f"Couldn't run '{self.arg0}' as Python code: {exc.__class__.__name__}: {exc}" raise CoverageException(msg) from exc # Execute the code object. # Return to the original directory in case the test code exits in # a non-existent directory. cwd = os.getcwd() try: exec(code, main_mod.__dict__) except SystemExit: # pylint: disable=try-except-raise # The user called sys.exit(). Just pass it along to the upper # layers, where it will be handled. raise except Exception: # Something went wrong while executing the user code. # Get the exc_info, and pack them into an exception that we can # throw up to the outer loop. We peel one layer off the traceback # so that the coverage.py code doesn't appear in the final printed # traceback. typ, err, tb = sys.exc_info() # PyPy3 weirdness. If I don't access __context__, then somehow it # is non-None when the exception is reported at the upper layer, # and a nested exception is shown to the user. This getattr fixes # it somehow? https://bitbucket.org/pypy/pypy/issue/1903 getattr(err, '__context__', None) # Call the excepthook. try: err.__traceback__ = err.__traceback__.tb_next sys.excepthook(typ, err, tb.tb_next) except SystemExit: # pylint: disable=try-except-raise raise except Exception as exc: # Getting the output right in the case of excepthook # shenanigans is kind of involved. sys.stderr.write("Error in sys.excepthook:\n") typ2, err2, tb2 = sys.exc_info() err2.__suppress_context__ = True err2.__traceback__ = err2.__traceback__.tb_next sys.__excepthook__(typ2, err2, tb2.tb_next) sys.stderr.write("\nOriginal exception was:\n") raise ExceptionDuringRun(typ, err, tb.tb_next) from exc else: sys.exit(1) finally: os.chdir(cwd) def run_python_module(args): """Run a Python module, as though with ``python -m name args...``. `args` is the argument array to present as sys.argv, including the first element naming the module being executed. This is a helper for tests, to encapsulate how to use PyRunner. """ runner = PyRunner(args, as_module=True) runner.prepare() runner.run() def run_python_file(args): """Run a Python file as if it were the main program on the command line. `args` is the argument array to present as sys.argv, including the first element naming the file being executed. `package` is the name of the enclosing package, if any. This is a helper for tests, to encapsulate how to use PyRunner. """ runner = PyRunner(args, as_module=False) runner.prepare() runner.run() def make_code_from_py(filename): """Get source from `filename` and make a code object of it.""" # Open the source file. try: source = get_python_source(filename) except (OSError, NoSource) as exc: raise NoSource(f"No file to run: '{filename}'") from exc code = compile_unicode(source, filename, "exec") return code def make_code_from_pyc(filename): """Get a code object from a .pyc file.""" try: fpyc = open(filename, "rb") except OSError as exc: raise NoCode(f"No file to run: '{filename}'") from exc with fpyc: # First four bytes are a version-specific magic number. It has to # match or we won't run the file. magic = fpyc.read(4) if magic != PYC_MAGIC_NUMBER: raise NoCode(f"Bad magic number in .pyc file: {magic} != {PYC_MAGIC_NUMBER}") date_based = True if env.PYBEHAVIOR.hashed_pyc_pep552: flags = struct.unpack('<L', fpyc.read(4))[0] hash_based = flags & 0x01 if hash_based: fpyc.read(8) # Skip the hash. date_based = False if date_based: # Skip the junk in the header that we don't need. fpyc.read(4) # Skip the moddate. # 3.3 added another long to the header (size), skip it. fpyc.read(4) # The rest of the file is the code object we want. code = marshal.load(fpyc) return code
11,487
Python
35.469841
95
0.581527
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/collector.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Raw data collector for coverage.py.""" import os import sys from coverage import env from coverage.debug import short_stack from coverage.disposition import FileDisposition from coverage.exceptions import CoverageException from coverage.misc import human_sorted, isolate_module from coverage.pytracer import PyTracer os = isolate_module(os) try: # Use the C extension code when we can, for speed. from coverage.tracer import CTracer, CFileDisposition except ImportError: # Couldn't import the C extension, maybe it isn't built. if os.getenv('COVERAGE_TEST_TRACER') == 'c': # pragma: part covered # During testing, we use the COVERAGE_TEST_TRACER environment variable # to indicate that we've fiddled with the environment to test this # fallback code. If we thought we had a C tracer, but couldn't import # it, then exit quickly and clearly instead of dribbling confusing # errors. I'm using sys.exit here instead of an exception because an # exception here causes all sorts of other noise in unittest. sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n") sys.exit(1) CTracer = None class Collector: """Collects trace data. Creates a Tracer object for each thread, since they track stack information. Each Tracer points to the same shared data, contributing traced data points. When the Collector is started, it creates a Tracer for the current thread, and installs a function to create Tracers for each new thread started. When the Collector is stopped, all active Tracers are stopped. Threads started while the Collector is stopped will never have Tracers associated with them. """ # The stack of active Collectors. Collectors are added here when started, # and popped when stopped. Collectors on the stack are paused when not # the top, and resumed when they become the top again. _collectors = [] # The concurrency settings we support here. SUPPORTED_CONCURRENCIES = {"greenlet", "eventlet", "gevent", "thread"} def __init__( self, should_trace, check_include, should_start_context, file_mapper, timid, branch, warn, concurrency, ): """Create a collector. `should_trace` is a function, taking a file name and a frame, and returning a `coverage.FileDisposition object`. `check_include` is a function taking a file name and a frame. It returns a boolean: True if the file should be traced, False if not. `should_start_context` is a function taking a frame, and returning a string. If the frame should be the start of a new context, the string is the new context. If the frame should not be the start of a new context, return None. `file_mapper` is a function taking a filename, and returning a Unicode filename. The result is the name that will be recorded in the data file. If `timid` is true, then a slower simpler trace function will be used. This is important for some environments where manipulation of tracing functions make the faster more sophisticated trace function not operate properly. If `branch` is true, then branches will be measured. This involves collecting data on which statements followed each other (arcs). Use `get_arc_data` to get the arc data. `warn` is a warning function, taking a single string message argument and an optional slug argument which will be a string or None, to be used if a warning needs to be issued. `concurrency` is a list of strings indicating the concurrency libraries in use. Valid values are "greenlet", "eventlet", "gevent", or "thread" (the default). Of these four values, only one can be supplied. Other values are ignored. """ self.should_trace = should_trace self.check_include = check_include self.should_start_context = should_start_context self.file_mapper = file_mapper self.warn = warn self.branch = branch self.threading = None self.covdata = None self.static_context = None self.origin = short_stack() self.concur_id_func = None self.mapped_file_cache = {} # We can handle a few concurrency options here, but only one at a time. these_concurrencies = self.SUPPORTED_CONCURRENCIES.intersection(concurrency) if len(these_concurrencies) > 1: raise CoverageException(f"Conflicting concurrency settings: {concurrency}") self.concurrency = these_concurrencies.pop() if these_concurrencies else '' try: if self.concurrency == "greenlet": import greenlet self.concur_id_func = greenlet.getcurrent elif self.concurrency == "eventlet": import eventlet.greenthread # pylint: disable=import-error,useless-suppression self.concur_id_func = eventlet.greenthread.getcurrent elif self.concurrency == "gevent": import gevent # pylint: disable=import-error,useless-suppression self.concur_id_func = gevent.getcurrent elif self.concurrency == "thread" or not self.concurrency: # It's important to import threading only if we need it. If # it's imported early, and the program being measured uses # gevent, then gevent's monkey-patching won't work properly. import threading self.threading = threading else: raise CoverageException(f"Don't understand concurrency={concurrency}") except ImportError as ex: raise CoverageException( "Couldn't trace with concurrency={}, the module isn't installed.".format( self.concurrency, ) ) from ex self.reset() if timid: # Being timid: use the simple Python trace function. self._trace_class = PyTracer else: # Being fast: use the C Tracer if it is available, else the Python # trace function. self._trace_class = CTracer or PyTracer if self._trace_class is CTracer: self.file_disposition_class = CFileDisposition self.supports_plugins = True self.packed_arcs = True else: self.file_disposition_class = FileDisposition self.supports_plugins = False self.packed_arcs = False def __repr__(self): return f"<Collector at 0x{id(self):x}: {self.tracer_name()}>" def use_data(self, covdata, context): """Use `covdata` for recording data.""" self.covdata = covdata self.static_context = context self.covdata.set_context(self.static_context) def tracer_name(self): """Return the class name of the tracer we're using.""" return self._trace_class.__name__ def _clear_data(self): """Clear out existing data, but stay ready for more collection.""" # We used to used self.data.clear(), but that would remove filename # keys and data values that were still in use higher up the stack # when we are called as part of switch_context. for d in self.data.values(): d.clear() for tracer in self.tracers: tracer.reset_activity() def reset(self): """Clear collected data, and prepare to collect more.""" # A dictionary mapping file names to dicts with line number keys (if not # branch coverage), or mapping file names to dicts with line number # pairs as keys (if branch coverage). self.data = {} # A dictionary mapping file names to file tracer plugin names that will # handle them. self.file_tracers = {} self.disabled_plugins = set() # The .should_trace_cache attribute is a cache from file names to # coverage.FileDisposition objects, or None. When a file is first # considered for tracing, a FileDisposition is obtained from # Coverage.should_trace. Its .trace attribute indicates whether the # file should be traced or not. If it should be, a plugin with dynamic # file names can decide not to trace it based on the dynamic file name # being excluded by the inclusion rules, in which case the # FileDisposition will be replaced by None in the cache. if env.PYPY: import __pypy__ # pylint: disable=import-error # Alex Gaynor said: # should_trace_cache is a strictly growing key: once a key is in # it, it never changes. Further, the keys used to access it are # generally constant, given sufficient context. That is to say, at # any given point _trace() is called, pypy is able to know the key. # This is because the key is determined by the physical source code # line, and that's invariant with the call site. # # This property of a dict with immutable keys, combined with # call-site-constant keys is a match for PyPy's module dict, # which is optimized for such workloads. # # This gives a 20% benefit on the workload described at # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage self.should_trace_cache = __pypy__.newdict("module") else: self.should_trace_cache = {} # Our active Tracers. self.tracers = [] self._clear_data() def _start_tracer(self): """Start a new Tracer object, and store it in self.tracers.""" tracer = self._trace_class() tracer.data = self.data tracer.trace_arcs = self.branch tracer.should_trace = self.should_trace tracer.should_trace_cache = self.should_trace_cache tracer.warn = self.warn if hasattr(tracer, 'concur_id_func'): tracer.concur_id_func = self.concur_id_func elif self.concur_id_func: raise CoverageException( "Can't support concurrency={} with {}, only threads are supported".format( self.concurrency, self.tracer_name(), ) ) if hasattr(tracer, 'file_tracers'): tracer.file_tracers = self.file_tracers if hasattr(tracer, 'threading'): tracer.threading = self.threading if hasattr(tracer, 'check_include'): tracer.check_include = self.check_include if hasattr(tracer, 'should_start_context'): tracer.should_start_context = self.should_start_context tracer.switch_context = self.switch_context if hasattr(tracer, 'disable_plugin'): tracer.disable_plugin = self.disable_plugin fn = tracer.start() self.tracers.append(tracer) return fn # The trace function has to be set individually on each thread before # execution begins. Ironically, the only support the threading module has # for running code before the thread main is the tracing function. So we # install this as a trace function, and the first time it's called, it does # the real trace installation. def _installation_trace(self, frame, event, arg): """Called on new threads, installs the real tracer.""" # Remove ourselves as the trace function. sys.settrace(None) # Install the real tracer. fn = self._start_tracer() # Invoke the real trace function with the current event, to be sure # not to lose an event. if fn: fn = fn(frame, event, arg) # Return the new trace function to continue tracing in this scope. return fn def start(self): """Start collecting trace information.""" if self._collectors: self._collectors[-1].pause() self.tracers = [] # Check to see whether we had a fullcoverage tracer installed. If so, # get the stack frames it stashed away for us. traces0 = [] fn0 = sys.gettrace() if fn0: tracer0 = getattr(fn0, '__self__', None) if tracer0: traces0 = getattr(tracer0, 'traces', []) try: # Install the tracer on this thread. fn = self._start_tracer() except: if self._collectors: self._collectors[-1].resume() raise # If _start_tracer succeeded, then we add ourselves to the global # stack of collectors. self._collectors.append(self) # Replay all the events from fullcoverage into the new trace function. for args in traces0: (frame, event, arg), lineno = args try: fn(frame, event, arg, lineno=lineno) except TypeError as ex: raise Exception("fullcoverage must be run with the C trace function.") from ex # Install our installation tracer in threading, to jump-start other # threads. if self.threading: self.threading.settrace(self._installation_trace) def stop(self): """Stop collecting trace information.""" assert self._collectors if self._collectors[-1] is not self: print("self._collectors:") for c in self._collectors: print(f" {c!r}\n{c.origin}") assert self._collectors[-1] is self, ( f"Expected current collector to be {self!r}, but it's {self._collectors[-1]!r}" ) self.pause() # Remove this Collector from the stack, and resume the one underneath # (if any). self._collectors.pop() if self._collectors: self._collectors[-1].resume() def pause(self): """Pause tracing, but be prepared to `resume`.""" for tracer in self.tracers: tracer.stop() stats = tracer.get_stats() if stats: print("\nCoverage.py tracer stats:") for k in human_sorted(stats.keys()): print(f"{k:>20}: {stats[k]}") if self.threading: self.threading.settrace(None) def resume(self): """Resume tracing after a `pause`.""" for tracer in self.tracers: tracer.start() if self.threading: self.threading.settrace(self._installation_trace) else: self._start_tracer() def _activity(self): """Has any activity been traced? Returns a boolean, True if any trace function was invoked. """ return any(tracer.activity() for tracer in self.tracers) def switch_context(self, new_context): """Switch to a new dynamic context.""" self.flush_data() if self.static_context: context = self.static_context if new_context: context += "|" + new_context else: context = new_context self.covdata.set_context(context) def disable_plugin(self, disposition): """Disable the plugin mentioned in `disposition`.""" file_tracer = disposition.file_tracer plugin = file_tracer._coverage_plugin plugin_name = plugin._coverage_plugin_name self.warn(f"Disabling plug-in {plugin_name!r} due to previous exception") plugin._coverage_enabled = False disposition.trace = False def cached_mapped_file(self, filename): """A locally cached version of file names mapped through file_mapper.""" key = (type(filename), filename) try: return self.mapped_file_cache[key] except KeyError: return self.mapped_file_cache.setdefault(key, self.file_mapper(filename)) def mapped_file_dict(self, d): """Return a dict like d, but with keys modified by file_mapper.""" # The call to list(items()) ensures that the GIL protects the dictionary # iterator against concurrent modifications by tracers running # in other threads. We try three times in case of concurrent # access, hoping to get a clean copy. runtime_err = None for _ in range(3): # pragma: part covered try: items = list(d.items()) except RuntimeError as ex: # pragma: cant happen runtime_err = ex else: break else: raise runtime_err # pragma: cant happen return {self.cached_mapped_file(k): v for k, v in items if v} def plugin_was_disabled(self, plugin): """Record that `plugin` was disabled during the run.""" self.disabled_plugins.add(plugin._coverage_plugin_name) def flush_data(self): """Save the collected data to our associated `CoverageData`. Data may have also been saved along the way. This forces the last of the data to be saved. Returns True if there was data to save, False if not. """ if not self._activity(): return False if self.branch: if self.packed_arcs: # Unpack the line number pairs packed into integers. See # tracer.c:CTracer_record_pair for the C code that creates # these packed ints. data = {} for fname, packeds in self.data.items(): tuples = [] for packed in packeds: l1 = packed & 0xFFFFF l2 = (packed & (0xFFFFF << 20)) >> 20 if packed & (1 << 40): l1 *= -1 if packed & (1 << 41): l2 *= -1 tuples.append((l1, l2)) data[fname] = tuples else: data = self.data self.covdata.add_arcs(self.mapped_file_dict(data)) else: self.covdata.add_lines(self.mapped_file_dict(self.data)) file_tracers = { k: v for k, v in self.file_tracers.items() if v not in self.disabled_plugins } self.covdata.add_file_tracers(self.mapped_file_dict(file_tracers)) self._clear_data() return True
18,826
Python
38.887712
98
0.60358
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/context.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Determine contexts for coverage.py""" def combine_context_switchers(context_switchers): """Create a single context switcher from multiple switchers. `context_switchers` is a list of functions that take a frame as an argument and return a string to use as the new context label. Returns a function that composites `context_switchers` functions, or None if `context_switchers` is an empty list. When invoked, the combined switcher calls `context_switchers` one-by-one until a string is returned. The combined switcher returns None if all `context_switchers` return None. """ if not context_switchers: return None if len(context_switchers) == 1: return context_switchers[0] def should_start_context(frame): """The combiner for multiple context switchers.""" for switcher in context_switchers: new_context = switcher(frame) if new_context is not None: return new_context return None return should_start_context def should_start_context_test_function(frame): """Is this frame calling a test_* function?""" co_name = frame.f_code.co_name if co_name.startswith("test") or co_name == "runTest": return qualname_from_frame(frame) return None def qualname_from_frame(frame): """Get a qualified name for the code running in `frame`.""" co = frame.f_code fname = co.co_name method = None if co.co_argcount and co.co_varnames[0] == "self": self = frame.f_locals.get("self", None) method = getattr(self, fname, None) if method is None: func = frame.f_globals.get(fname) if func is None: return None return func.__module__ + "." + fname func = getattr(method, "__func__", None) if func is None: cls = self.__class__ return cls.__module__ + "." + cls.__name__ + "." + fname return func.__module__ + "." + func.__qualname__
2,142
Python
31.469696
79
0.644258
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/results.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Results of coverage measurement.""" import collections from coverage.debug import SimpleReprMixin from coverage.exceptions import CoverageException from coverage.misc import contract, nice_pair class Analysis: """The results of analyzing a FileReporter.""" def __init__(self, data, precision, file_reporter, file_mapper): self.data = data self.file_reporter = file_reporter self.filename = file_mapper(self.file_reporter.filename) self.statements = self.file_reporter.lines() self.excluded = self.file_reporter.excluded_lines() # Identify missing statements. executed = self.data.lines(self.filename) or [] executed = self.file_reporter.translate_lines(executed) self.executed = executed self.missing = self.statements - self.executed if self.data.has_arcs(): self._arc_possibilities = sorted(self.file_reporter.arcs()) self.exit_counts = self.file_reporter.exit_counts() self.no_branch = self.file_reporter.no_branch_lines() n_branches = self._total_branches() mba = self.missing_branch_arcs() n_partial_branches = sum(len(v) for k,v in mba.items() if k not in self.missing) n_missing_branches = sum(len(v) for k,v in mba.items()) else: self._arc_possibilities = [] self.exit_counts = {} self.no_branch = set() n_branches = n_partial_branches = n_missing_branches = 0 self.numbers = Numbers( precision=precision, n_files=1, n_statements=len(self.statements), n_excluded=len(self.excluded), n_missing=len(self.missing), n_branches=n_branches, n_partial_branches=n_partial_branches, n_missing_branches=n_missing_branches, ) def missing_formatted(self, branches=False): """The missing line numbers, formatted nicely. Returns a string like "1-2, 5-11, 13-14". If `branches` is true, includes the missing branch arcs also. """ if branches and self.has_arcs(): arcs = self.missing_branch_arcs().items() else: arcs = None return format_lines(self.statements, self.missing, arcs=arcs) def has_arcs(self): """Were arcs measured in this result?""" return self.data.has_arcs() @contract(returns='list(tuple(int, int))') def arc_possibilities(self): """Returns a sorted list of the arcs in the code.""" return self._arc_possibilities @contract(returns='list(tuple(int, int))') def arcs_executed(self): """Returns a sorted list of the arcs actually executed in the code.""" executed = self.data.arcs(self.filename) or [] executed = self.file_reporter.translate_arcs(executed) return sorted(executed) @contract(returns='list(tuple(int, int))') def arcs_missing(self): """Returns a sorted list of the unexecuted arcs in the code.""" possible = self.arc_possibilities() executed = self.arcs_executed() missing = ( p for p in possible if p not in executed and p[0] not in self.no_branch and p[1] not in self.excluded ) return sorted(missing) @contract(returns='list(tuple(int, int))') def arcs_unpredicted(self): """Returns a sorted list of the executed arcs missing from the code.""" possible = self.arc_possibilities() executed = self.arcs_executed() # Exclude arcs here which connect a line to itself. They can occur # in executed data in some cases. This is where they can cause # trouble, and here is where it's the least burden to remove them. # Also, generators can somehow cause arcs from "enter" to "exit", so # make sure we have at least one positive value. unpredicted = ( e for e in executed if e not in possible and e[0] != e[1] and (e[0] > 0 or e[1] > 0) ) return sorted(unpredicted) def _branch_lines(self): """Returns a list of line numbers that have more than one exit.""" return [l1 for l1,count in self.exit_counts.items() if count > 1] def _total_branches(self): """How many total branches are there?""" return sum(count for count in self.exit_counts.values() if count > 1) @contract(returns='dict(int: list(int))') def missing_branch_arcs(self): """Return arcs that weren't executed from branch lines. Returns {l1:[l2a,l2b,...], ...} """ missing = self.arcs_missing() branch_lines = set(self._branch_lines()) mba = collections.defaultdict(list) for l1, l2 in missing: if l1 in branch_lines: mba[l1].append(l2) return mba @contract(returns='dict(int: tuple(int, int))') def branch_stats(self): """Get stats about branches. Returns a dict mapping line numbers to a tuple: (total_exits, taken_exits). """ missing_arcs = self.missing_branch_arcs() stats = {} for lnum in self._branch_lines(): exits = self.exit_counts[lnum] missing = len(missing_arcs[lnum]) stats[lnum] = (exits, exits - missing) return stats class Numbers(SimpleReprMixin): """The numerical results of measuring coverage. This holds the basic statistics from `Analysis`, and is used to roll up statistics across files. """ def __init__(self, precision=0, n_files=0, n_statements=0, n_excluded=0, n_missing=0, n_branches=0, n_partial_branches=0, n_missing_branches=0 ): assert 0 <= precision < 10 self._precision = precision self._near0 = 1.0 / 10**precision self._near100 = 100.0 - self._near0 self.n_files = n_files self.n_statements = n_statements self.n_excluded = n_excluded self.n_missing = n_missing self.n_branches = n_branches self.n_partial_branches = n_partial_branches self.n_missing_branches = n_missing_branches def init_args(self): """Return a list for __init__(*args) to recreate this object.""" return [ self._precision, self.n_files, self.n_statements, self.n_excluded, self.n_missing, self.n_branches, self.n_partial_branches, self.n_missing_branches, ] @property def n_executed(self): """Returns the number of executed statements.""" return self.n_statements - self.n_missing @property def n_executed_branches(self): """Returns the number of executed branches.""" return self.n_branches - self.n_missing_branches @property def pc_covered(self): """Returns a single percentage value for coverage.""" if self.n_statements > 0: numerator, denominator = self.ratio_covered pc_cov = (100.0 * numerator) / denominator else: pc_cov = 100.0 return pc_cov @property def pc_covered_str(self): """Returns the percent covered, as a string, without a percent sign. Note that "0" is only returned when the value is truly zero, and "100" is only returned when the value is truly 100. Rounding can never result in either "0" or "100". """ return self.display_covered(self.pc_covered) def display_covered(self, pc): """Return a displayable total percentage, as a string. Note that "0" is only returned when the value is truly zero, and "100" is only returned when the value is truly 100. Rounding can never result in either "0" or "100". """ if 0 < pc < self._near0: pc = self._near0 elif self._near100 < pc < 100: pc = self._near100 else: pc = round(pc, self._precision) return "%.*f" % (self._precision, pc) def pc_str_width(self): """How many characters wide can pc_covered_str be?""" width = 3 # "100" if self._precision > 0: width += 1 + self._precision return width @property def ratio_covered(self): """Return a numerator and denominator for the coverage ratio.""" numerator = self.n_executed + self.n_executed_branches denominator = self.n_statements + self.n_branches return numerator, denominator def __add__(self, other): nums = Numbers(precision=self._precision) nums.n_files = self.n_files + other.n_files nums.n_statements = self.n_statements + other.n_statements nums.n_excluded = self.n_excluded + other.n_excluded nums.n_missing = self.n_missing + other.n_missing nums.n_branches = self.n_branches + other.n_branches nums.n_partial_branches = ( self.n_partial_branches + other.n_partial_branches ) nums.n_missing_branches = ( self.n_missing_branches + other.n_missing_branches ) return nums def __radd__(self, other): # Implementing 0+Numbers allows us to sum() a list of Numbers. assert other == 0 # we only ever call it this way. return self def _line_ranges(statements, lines): """Produce a list of ranges for `format_lines`.""" statements = sorted(statements) lines = sorted(lines) pairs = [] start = None lidx = 0 for stmt in statements: if lidx >= len(lines): break if stmt == lines[lidx]: lidx += 1 if not start: start = stmt end = stmt elif start: pairs.append((start, end)) start = None if start: pairs.append((start, end)) return pairs def format_lines(statements, lines, arcs=None): """Nicely format a list of line numbers. Format a list of line numbers for printing by coalescing groups of lines as long as the lines represent consecutive statements. This will coalesce even if there are gaps between statements. For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14". Both `lines` and `statements` can be any iterable. All of the elements of `lines` must be in `statements`, and all of the values must be positive integers. If `arcs` is provided, they are (start,[end,end,end]) pairs that will be included in the output as long as start isn't in `lines`. """ line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)] if arcs: line_exits = sorted(arcs) for line, exits in line_exits: for ex in sorted(exits): if line not in lines and ex not in lines: dest = (ex if ex > 0 else "exit") line_items.append((line, "%d->%s" % (line, dest))) ret = ', '.join(t[-1] for t in sorted(line_items)) return ret @contract(total='number', fail_under='number', precision=int, returns=bool) def should_fail_under(total, fail_under, precision): """Determine if a total should fail due to fail-under. `total` is a float, the coverage measurement total. `fail_under` is the fail_under setting to compare with. `precision` is the number of digits to consider after the decimal point. Returns True if the total should fail. """ # We can never achieve higher than 100% coverage, or less than zero. if not (0 <= fail_under <= 100.0): msg = f"fail_under={fail_under} is invalid. Must be between 0 and 100." raise CoverageException(msg) # Special case for fail_under=100, it must really be 100. if fail_under == 100.0 and total != 100.0: return True return round(total, precision) < fail_under
12,285
Python
34.40634
92
0.60057
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/sqldata.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Sqlite coverage data.""" # TODO: factor out dataop debugging to a wrapper class? # TODO: make sure all dataop debugging is in place somehow import collections import datetime import functools import glob import itertools import os import re import sqlite3 import sys import threading import zlib from coverage.debug import NoDebugging, SimpleReprMixin, clipped_repr from coverage.exceptions import CoverageException from coverage.files import PathAliases from coverage.misc import contract, file_be_gone, filename_suffix, isolate_module from coverage.numbits import numbits_to_nums, numbits_union, nums_to_numbits from coverage.version import __version__ os = isolate_module(os) # If you change the schema, increment the SCHEMA_VERSION, and update the # docs in docs/dbschema.rst also. SCHEMA_VERSION = 7 # Schema versions: # 1: Released in 5.0a2 # 2: Added contexts in 5.0a3. # 3: Replaced line table with line_map table. # 4: Changed line_map.bitmap to line_map.numbits. # 5: Added foreign key declarations. # 6: Key-value in meta. # 7: line_map -> line_bits SCHEMA = """\ CREATE TABLE coverage_schema ( -- One row, to record the version of the schema in this db. version integer ); CREATE TABLE meta ( -- Key-value pairs, to record metadata about the data key text, value text, unique (key) -- Keys: -- 'has_arcs' boolean -- Is this data recording branches? -- 'sys_argv' text -- The coverage command line that recorded the data. -- 'version' text -- The version of coverage.py that made the file. -- 'when' text -- Datetime when the file was created. ); CREATE TABLE file ( -- A row per file measured. id integer primary key, path text, unique (path) ); CREATE TABLE context ( -- A row per context measured. id integer primary key, context text, unique (context) ); CREATE TABLE line_bits ( -- If recording lines, a row per context per file executed. -- All of the line numbers for that file/context are in one numbits. file_id integer, -- foreign key to `file`. context_id integer, -- foreign key to `context`. numbits blob, -- see the numbits functions in coverage.numbits foreign key (file_id) references file (id), foreign key (context_id) references context (id), unique (file_id, context_id) ); CREATE TABLE arc ( -- If recording branches, a row per context per from/to line transition executed. file_id integer, -- foreign key to `file`. context_id integer, -- foreign key to `context`. fromno integer, -- line number jumped from. tono integer, -- line number jumped to. foreign key (file_id) references file (id), foreign key (context_id) references context (id), unique (file_id, context_id, fromno, tono) ); CREATE TABLE tracer ( -- A row per file indicating the tracer used for that file. file_id integer primary key, tracer text, foreign key (file_id) references file (id) ); """ class CoverageData(SimpleReprMixin): """Manages collected coverage data, including file storage. This class is the public supported API to the data that coverage.py collects during program execution. It includes information about what code was executed. It does not include information from the analysis phase, to determine what lines could have been executed, or what lines were not executed. .. note:: The data file is currently a SQLite database file, with a :ref:`documented schema <dbschema>`. The schema is subject to change though, so be careful about querying it directly. Use this API if you can to isolate yourself from changes. There are a number of kinds of data that can be collected: * **lines**: the line numbers of source lines that were executed. These are always available. * **arcs**: pairs of source and destination line numbers for transitions between source lines. These are only available if branch coverage was used. * **file tracer names**: the module names of the file tracer plugins that handled each file in the data. Lines, arcs, and file tracer names are stored for each source file. File names in this API are case-sensitive, even on platforms with case-insensitive file systems. A data file either stores lines, or arcs, but not both. A data file is associated with the data when the :class:`CoverageData` is created, using the parameters `basename`, `suffix`, and `no_disk`. The base name can be queried with :meth:`base_filename`, and the actual file name being used is available from :meth:`data_filename`. To read an existing coverage.py data file, use :meth:`read`. You can then access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`, or :meth:`file_tracer`. The :meth:`has_arcs` method indicates whether arc data is available. You can get a set of the files in the data with :meth:`measured_files`. As with most Python containers, you can determine if there is any data at all by using this object as a boolean value. The contexts for each line in a file can be read with :meth:`contexts_by_lineno`. To limit querying to certain contexts, use :meth:`set_query_context` or :meth:`set_query_contexts`. These will narrow the focus of subsequent :meth:`lines`, :meth:`arcs`, and :meth:`contexts_by_lineno` calls. The set of all measured context names can be retrieved with :meth:`measured_contexts`. Most data files will be created by coverage.py itself, but you can use methods here to create data files if you like. The :meth:`add_lines`, :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways that are convenient for coverage.py. To record data for contexts, use :meth:`set_context` to set a context to be used for subsequent :meth:`add_lines` and :meth:`add_arcs` calls. To add a source file without any measured data, use :meth:`touch_file`, or :meth:`touch_files` for a list of such files. Write the data to its file with :meth:`write`. You can clear the data in memory with :meth:`erase`. Two data collections can be combined by using :meth:`update` on one :class:`CoverageData`, passing it the other. Data in a :class:`CoverageData` can be serialized and deserialized with :meth:`dumps` and :meth:`loads`. The methods used during the coverage.py collection phase (:meth:`add_lines`, :meth:`add_arcs`, :meth:`set_context`, and :meth:`add_file_tracers`) are thread-safe. Other methods may not be. """ def __init__(self, basename=None, suffix=None, no_disk=False, warn=None, debug=None): """Create a :class:`CoverageData` object to hold coverage-measured data. Arguments: basename (str): the base name of the data file, defaulting to ".coverage". suffix (str or bool): has the same meaning as the `data_suffix` argument to :class:`coverage.Coverage`. no_disk (bool): if True, keep all data in memory, and don't write any disk file. warn: a warning callback function, accepting a warning message argument. debug: a `DebugControl` object (optional) """ self._no_disk = no_disk self._basename = os.path.abspath(basename or ".coverage") self._suffix = suffix self._warn = warn self._debug = debug or NoDebugging() self._choose_filename() self._file_map = {} # Maps thread ids to SqliteDb objects. self._dbs = {} self._pid = os.getpid() # Synchronize the operations used during collection. self._lock = threading.Lock() # Are we in sync with the data file? self._have_used = False self._has_lines = False self._has_arcs = False self._current_context = None self._current_context_id = None self._query_context_ids = None def _locked(method): # pylint: disable=no-self-argument """A decorator for methods that should hold self._lock.""" @functools.wraps(method) def _wrapped(self, *args, **kwargs): with self._lock: # pylint: disable=not-callable return method(self, *args, **kwargs) return _wrapped def _choose_filename(self): """Set self._filename based on inited attributes.""" if self._no_disk: self._filename = ":memory:" else: self._filename = self._basename suffix = filename_suffix(self._suffix) if suffix: self._filename += "." + suffix def _reset(self): """Reset our attributes.""" if self._dbs: for db in self._dbs.values(): db.close() self._dbs = {} self._file_map = {} self._have_used = False self._current_context_id = None def _create_db(self): """Create a db file that doesn't exist yet. Initializes the schema and certain metadata. """ if self._debug.should("dataio"): self._debug.write(f"Creating data file {self._filename!r}") self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug) with db: db.executescript(SCHEMA) db.execute("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,)) db.executemany( "insert into meta (key, value) values (?, ?)", [ ("sys_argv", str(getattr(sys, "argv", None))), ("version", __version__), ("when", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), ] ) def _open_db(self): """Open an existing db file, and read its metadata.""" if self._debug.should("dataio"): self._debug.write(f"Opening data file {self._filename!r}") self._dbs[threading.get_ident()] = SqliteDb(self._filename, self._debug) self._read_db() def _read_db(self): """Read the metadata from a database so that we are ready to use it.""" with self._dbs[threading.get_ident()] as db: try: schema_version, = db.execute_one("select version from coverage_schema") except Exception as exc: raise CoverageException( "Data file {!r} doesn't seem to be a coverage data file: {}".format( self._filename, exc ) ) from exc else: if schema_version != SCHEMA_VERSION: raise CoverageException( "Couldn't use data file {!r}: wrong schema: {} instead of {}".format( self._filename, schema_version, SCHEMA_VERSION ) ) for row in db.execute("select value from meta where key = 'has_arcs'"): self._has_arcs = bool(int(row[0])) self._has_lines = not self._has_arcs for path, file_id in db.execute("select path, id from file"): self._file_map[path] = file_id def _connect(self): """Get the SqliteDb object to use.""" if threading.get_ident() not in self._dbs: if os.path.exists(self._filename): self._open_db() else: self._create_db() return self._dbs[threading.get_ident()] def __nonzero__(self): if (threading.get_ident() not in self._dbs and not os.path.exists(self._filename)): return False try: with self._connect() as con: rows = con.execute("select * from file limit 1") return bool(list(rows)) except CoverageException: return False __bool__ = __nonzero__ @contract(returns="bytes") def dumps(self): """Serialize the current data to a byte string. The format of the serialized data is not documented. It is only suitable for use with :meth:`loads` in the same version of coverage.py. Note that this serialization is not what gets stored in coverage data files. This method is meant to produce bytes that can be transmitted elsewhere and then deserialized with :meth:`loads`. Returns: A byte string of serialized data. .. versionadded:: 5.0 """ if self._debug.should("dataio"): self._debug.write(f"Dumping data from data file {self._filename!r}") with self._connect() as con: return b"z" + zlib.compress(con.dump().encode("utf-8")) @contract(data="bytes") def loads(self, data): """Deserialize data from :meth:`dumps`. Use with a newly-created empty :class:`CoverageData` object. It's undefined what happens if the object already has data in it. Note that this is not for reading data from a coverage data file. It is only for use on data you produced with :meth:`dumps`. Arguments: data: A byte string of serialized data produced by :meth:`dumps`. .. versionadded:: 5.0 """ if self._debug.should("dataio"): self._debug.write(f"Loading data into data file {self._filename!r}") if data[:1] != b"z": raise CoverageException( f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)" ) script = zlib.decompress(data[1:]).decode("utf-8") self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug) with db: db.executescript(script) self._read_db() self._have_used = True def _file_id(self, filename, add=False): """Get the file id for `filename`. If filename is not in the database yet, add it if `add` is True. If `add` is not True, return None. """ if filename not in self._file_map: if add: with self._connect() as con: cur = con.execute("insert or replace into file (path) values (?)", (filename,)) self._file_map[filename] = cur.lastrowid return self._file_map.get(filename) def _context_id(self, context): """Get the id for a context.""" assert context is not None self._start_using() with self._connect() as con: row = con.execute_one("select id from context where context = ?", (context,)) if row is not None: return row[0] else: return None @_locked def set_context(self, context): """Set the current context for future :meth:`add_lines` etc. `context` is a str, the name of the context to use for the next data additions. The context persists until the next :meth:`set_context`. .. versionadded:: 5.0 """ if self._debug.should("dataop"): self._debug.write(f"Setting context: {context!r}") self._current_context = context self._current_context_id = None def _set_context_id(self): """Use the _current_context to set _current_context_id.""" context = self._current_context or "" context_id = self._context_id(context) if context_id is not None: self._current_context_id = context_id else: with self._connect() as con: cur = con.execute("insert into context (context) values (?)", (context,)) self._current_context_id = cur.lastrowid def base_filename(self): """The base filename for storing data. .. versionadded:: 5.0 """ return self._basename def data_filename(self): """Where is the data stored? .. versionadded:: 5.0 """ return self._filename @_locked def add_lines(self, line_data): """Add measured line data. `line_data` is a dictionary mapping file names to iterables of ints:: { filename: { line1, line2, ... }, ...} """ if self._debug.should("dataop"): self._debug.write("Adding lines: %d files, %d lines total" % ( len(line_data), sum(len(lines) for lines in line_data.values()) )) self._start_using() self._choose_lines_or_arcs(lines=True) if not line_data: return with self._connect() as con: self._set_context_id() for filename, linenos in line_data.items(): linemap = nums_to_numbits(linenos) file_id = self._file_id(filename, add=True) query = "select numbits from line_bits where file_id = ? and context_id = ?" existing = list(con.execute(query, (file_id, self._current_context_id))) if existing: linemap = numbits_union(linemap, existing[0][0]) con.execute( "insert or replace into line_bits " + " (file_id, context_id, numbits) values (?, ?, ?)", (file_id, self._current_context_id, linemap), ) @_locked def add_arcs(self, arc_data): """Add measured arc data. `arc_data` is a dictionary mapping file names to iterables of pairs of ints:: { filename: { (l1,l2), (l1,l2), ... }, ...} """ if self._debug.should("dataop"): self._debug.write("Adding arcs: %d files, %d arcs total" % ( len(arc_data), sum(len(arcs) for arcs in arc_data.values()) )) self._start_using() self._choose_lines_or_arcs(arcs=True) if not arc_data: return with self._connect() as con: self._set_context_id() for filename, arcs in arc_data.items(): file_id = self._file_id(filename, add=True) data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs] con.executemany( "insert or ignore into arc " + "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", data, ) def _choose_lines_or_arcs(self, lines=False, arcs=False): """Force the data file to choose between lines and arcs.""" assert lines or arcs assert not (lines and arcs) if lines and self._has_arcs: raise CoverageException("Can't add line measurements to existing branch data") if arcs and self._has_lines: raise CoverageException("Can't add branch measurements to existing line data") if not self._has_arcs and not self._has_lines: self._has_lines = lines self._has_arcs = arcs with self._connect() as con: con.execute( "insert into meta (key, value) values (?, ?)", ("has_arcs", str(int(arcs))) ) @_locked def add_file_tracers(self, file_tracers): """Add per-file plugin information. `file_tracers` is { filename: plugin_name, ... } """ if self._debug.should("dataop"): self._debug.write("Adding file tracers: %d files" % (len(file_tracers),)) if not file_tracers: return self._start_using() with self._connect() as con: for filename, plugin_name in file_tracers.items(): file_id = self._file_id(filename) if file_id is None: raise CoverageException( f"Can't add file tracer data for unmeasured file '{filename}'" ) existing_plugin = self.file_tracer(filename) if existing_plugin: if existing_plugin != plugin_name: raise CoverageException( "Conflicting file tracer name for '{}': {!r} vs {!r}".format( filename, existing_plugin, plugin_name, ) ) elif plugin_name: con.execute( "insert into tracer (file_id, tracer) values (?, ?)", (file_id, plugin_name) ) def touch_file(self, filename, plugin_name=""): """Ensure that `filename` appears in the data, empty if needed. `plugin_name` is the name of the plugin responsible for this file. It is used to associate the right filereporter, etc. """ self.touch_files([filename], plugin_name) def touch_files(self, filenames, plugin_name=""): """Ensure that `filenames` appear in the data, empty if needed. `plugin_name` is the name of the plugin responsible for these files. It is used to associate the right filereporter, etc. """ if self._debug.should("dataop"): self._debug.write(f"Touching {filenames!r}") self._start_using() with self._connect(): # Use this to get one transaction. if not self._has_arcs and not self._has_lines: raise CoverageException("Can't touch files in an empty CoverageData") for filename in filenames: self._file_id(filename, add=True) if plugin_name: # Set the tracer for this file self.add_file_tracers({filename: plugin_name}) def update(self, other_data, aliases=None): """Update this data with data from several other :class:`CoverageData` instances. If `aliases` is provided, it's a `PathAliases` object that is used to re-map paths to match the local machine's. """ if self._debug.should("dataop"): self._debug.write("Updating with data from {!r}".format( getattr(other_data, "_filename", "???"), )) if self._has_lines and other_data._has_arcs: raise CoverageException("Can't combine arc data with line data") if self._has_arcs and other_data._has_lines: raise CoverageException("Can't combine line data with arc data") aliases = aliases or PathAliases() # Force the database we're writing to to exist before we start nesting # contexts. self._start_using() # Collector for all arcs, lines and tracers other_data.read() with other_data._connect() as conn: # Get files data. cur = conn.execute("select path from file") files = {path: aliases.map(path) for (path,) in cur} cur.close() # Get contexts data. cur = conn.execute("select context from context") contexts = [context for (context,) in cur] cur.close() # Get arc data. cur = conn.execute( "select file.path, context.context, arc.fromno, arc.tono " + "from arc " + "inner join file on file.id = arc.file_id " + "inner join context on context.id = arc.context_id" ) arcs = [(files[path], context, fromno, tono) for (path, context, fromno, tono) in cur] cur.close() # Get line data. cur = conn.execute( "select file.path, context.context, line_bits.numbits " + "from line_bits " + "inner join file on file.id = line_bits.file_id " + "inner join context on context.id = line_bits.context_id" ) lines = {(files[path], context): numbits for (path, context, numbits) in cur} cur.close() # Get tracer data. cur = conn.execute( "select file.path, tracer " + "from tracer " + "inner join file on file.id = tracer.file_id" ) tracers = {files[path]: tracer for (path, tracer) in cur} cur.close() with self._connect() as conn: conn.con.isolation_level = "IMMEDIATE" # Get all tracers in the DB. Files not in the tracers are assumed # to have an empty string tracer. Since Sqlite does not support # full outer joins, we have to make two queries to fill the # dictionary. this_tracers = {path: "" for path, in conn.execute("select path from file")} this_tracers.update({ aliases.map(path): tracer for path, tracer in conn.execute( "select file.path, tracer from tracer " + "inner join file on file.id = tracer.file_id" ) }) # Create all file and context rows in the DB. conn.executemany( "insert or ignore into file (path) values (?)", ((file,) for file in files.values()) ) file_ids = { path: id for id, path in conn.execute("select id, path from file") } conn.executemany( "insert or ignore into context (context) values (?)", ((context,) for context in contexts) ) context_ids = { context: id for id, context in conn.execute("select id, context from context") } # Prepare tracers and fail, if a conflict is found. # tracer_paths is used to ensure consistency over the tracer data # and tracer_map tracks the tracers to be inserted. tracer_map = {} for path in files.values(): this_tracer = this_tracers.get(path) other_tracer = tracers.get(path, "") # If there is no tracer, there is always the None tracer. if this_tracer is not None and this_tracer != other_tracer: raise CoverageException( "Conflicting file tracer name for '{}': {!r} vs {!r}".format( path, this_tracer, other_tracer ) ) tracer_map[path] = other_tracer # Prepare arc and line rows to be inserted by converting the file # and context strings with integer ids. Then use the efficient # `executemany()` to insert all rows at once. arc_rows = ( (file_ids[file], context_ids[context], fromno, tono) for file, context, fromno, tono in arcs ) # Get line data. cur = conn.execute( "select file.path, context.context, line_bits.numbits " + "from line_bits " + "inner join file on file.id = line_bits.file_id " + "inner join context on context.id = line_bits.context_id" ) for path, context, numbits in cur: key = (aliases.map(path), context) if key in lines: numbits = numbits_union(lines[key], numbits) lines[key] = numbits cur.close() if arcs: self._choose_lines_or_arcs(arcs=True) # Write the combined data. conn.executemany( "insert or ignore into arc " + "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", arc_rows ) if lines: self._choose_lines_or_arcs(lines=True) conn.execute("delete from line_bits") conn.executemany( "insert into line_bits " + "(file_id, context_id, numbits) values (?, ?, ?)", [ (file_ids[file], context_ids[context], numbits) for (file, context), numbits in lines.items() ] ) conn.executemany( "insert or ignore into tracer (file_id, tracer) values (?, ?)", ((file_ids[filename], tracer) for filename, tracer in tracer_map.items()) ) # Update all internal cache data. self._reset() self.read() def erase(self, parallel=False): """Erase the data in this object. If `parallel` is true, then also deletes data files created from the basename by parallel-mode. """ self._reset() if self._no_disk: return if self._debug.should("dataio"): self._debug.write(f"Erasing data file {self._filename!r}") file_be_gone(self._filename) if parallel: data_dir, local = os.path.split(self._filename) localdot = local + ".*" pattern = os.path.join(os.path.abspath(data_dir), localdot) for filename in glob.glob(pattern): if self._debug.should("dataio"): self._debug.write(f"Erasing parallel data file {filename!r}") file_be_gone(filename) def read(self): """Start using an existing data file.""" with self._connect(): # TODO: doesn't look right self._have_used = True def write(self): """Ensure the data is written to the data file.""" pass def _start_using(self): """Call this before using the database at all.""" if self._pid != os.getpid(): # Looks like we forked! Have to start a new data file. self._reset() self._choose_filename() self._pid = os.getpid() if not self._have_used: self.erase() self._have_used = True def has_arcs(self): """Does the database have arcs (True) or lines (False).""" return bool(self._has_arcs) def measured_files(self): """A set of all files that had been measured.""" return set(self._file_map) def measured_contexts(self): """A set of all contexts that have been measured. .. versionadded:: 5.0 """ self._start_using() with self._connect() as con: contexts = {row[0] for row in con.execute("select distinct(context) from context")} return contexts def file_tracer(self, filename): """Get the plugin name of the file tracer for a file. Returns the name of the plugin that handles this file. If the file was measured, but didn't use a plugin, then "" is returned. If the file was not measured, then None is returned. """ self._start_using() with self._connect() as con: file_id = self._file_id(filename) if file_id is None: return None row = con.execute_one("select tracer from tracer where file_id = ?", (file_id,)) if row is not None: return row[0] or "" return "" # File was measured, but no tracer associated. def set_query_context(self, context): """Set a context for subsequent querying. The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` calls will be limited to only one context. `context` is a string which must match a context exactly. If it does not, no exception is raised, but queries will return no data. .. versionadded:: 5.0 """ self._start_using() with self._connect() as con: cur = con.execute("select id from context where context = ?", (context,)) self._query_context_ids = [row[0] for row in cur.fetchall()] def set_query_contexts(self, contexts): """Set a number of contexts for subsequent querying. The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` calls will be limited to the specified contexts. `contexts` is a list of Python regular expressions. Contexts will be matched using :func:`re.search <python:re.search>`. Data will be included in query results if they are part of any of the contexts matched. .. versionadded:: 5.0 """ self._start_using() if contexts: with self._connect() as con: context_clause = " or ".join(["context regexp ?"] * len(contexts)) cur = con.execute("select id from context where " + context_clause, contexts) self._query_context_ids = [row[0] for row in cur.fetchall()] else: self._query_context_ids = None def lines(self, filename): """Get the list of lines executed for a source file. If the file was not measured, returns None. A file might be measured, and have no lines executed, in which case an empty list is returned. If the file was executed, returns a list of integers, the line numbers executed in the file. The list is in no particular order. """ self._start_using() if self.has_arcs(): arcs = self.arcs(filename) if arcs is not None: all_lines = itertools.chain.from_iterable(arcs) return list({l for l in all_lines if l > 0}) with self._connect() as con: file_id = self._file_id(filename) if file_id is None: return None else: query = "select numbits from line_bits where file_id = ?" data = [file_id] if self._query_context_ids is not None: ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and context_id in (" + ids_array + ")" data += self._query_context_ids bitmaps = list(con.execute(query, data)) nums = set() for row in bitmaps: nums.update(numbits_to_nums(row[0])) return list(nums) def arcs(self, filename): """Get the list of arcs executed for a file. If the file was not measured, returns None. A file might be measured, and have no arcs executed, in which case an empty list is returned. If the file was executed, returns a list of 2-tuples of integers. Each pair is a starting line number and an ending line number for a transition from one line to another. The list is in no particular order. Negative numbers have special meaning. If the starting line number is -N, it represents an entry to the code object that starts at line N. If the ending ling number is -N, it's an exit from the code object that starts at line N. """ self._start_using() with self._connect() as con: file_id = self._file_id(filename) if file_id is None: return None else: query = "select distinct fromno, tono from arc where file_id = ?" data = [file_id] if self._query_context_ids is not None: ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and context_id in (" + ids_array + ")" data += self._query_context_ids arcs = con.execute(query, data) return list(arcs) def contexts_by_lineno(self, filename): """Get the contexts for each line in a file. Returns: A dict mapping line numbers to a list of context names. .. versionadded:: 5.0 """ self._start_using() with self._connect() as con: file_id = self._file_id(filename) if file_id is None: return {} lineno_contexts_map = collections.defaultdict(set) if self.has_arcs(): query = ( "select arc.fromno, arc.tono, context.context " + "from arc, context " + "where arc.file_id = ? and arc.context_id = context.id" ) data = [file_id] if self._query_context_ids is not None: ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and arc.context_id in (" + ids_array + ")" data += self._query_context_ids for fromno, tono, context in con.execute(query, data): if fromno > 0: lineno_contexts_map[fromno].add(context) if tono > 0: lineno_contexts_map[tono].add(context) else: query = ( "select l.numbits, c.context from line_bits l, context c " + "where l.context_id = c.id " + "and file_id = ?" ) data = [file_id] if self._query_context_ids is not None: ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and l.context_id in (" + ids_array + ")" data += self._query_context_ids for numbits, context in con.execute(query, data): for lineno in numbits_to_nums(numbits): lineno_contexts_map[lineno].add(context) return {lineno: list(contexts) for lineno, contexts in lineno_contexts_map.items()} @classmethod def sys_info(cls): """Our information for `Coverage.sys_info`. Returns a list of (key, value) pairs. """ with SqliteDb(":memory:", debug=NoDebugging()) as db: temp_store = [row[0] for row in db.execute("pragma temp_store")] copts = [row[0] for row in db.execute("pragma compile_options")] # Yes, this is overkill. I don't like the long list of options # at the end of "debug sys", but I don't want to omit information. copts = ["; ".join(copts[i:i + 3]) for i in range(0, len(copts), 3)] return [ ("sqlite3_version", sqlite3.version), ("sqlite3_sqlite_version", sqlite3.sqlite_version), ("sqlite3_temp_store", temp_store), ("sqlite3_compile_options", copts), ] class SqliteDb(SimpleReprMixin): """A simple abstraction over a SQLite database. Use as a context manager, then you can use it like a :class:`python:sqlite3.Connection` object:: with SqliteDb(filename, debug_control) as db: db.execute("insert into schema (version) values (?)", (SCHEMA_VERSION,)) """ def __init__(self, filename, debug): self.debug = debug if debug.should("sql") else None self.filename = filename self.nest = 0 self.con = None def _connect(self): """Connect to the db and do universal initialization.""" if self.con is not None: return # It can happen that Python switches threads while the tracer writes # data. The second thread will also try to write to the data, # effectively causing a nested context. However, given the idempotent # nature of the tracer operations, sharing a connection among threads # is not a problem. if self.debug: self.debug.write(f"Connecting to {self.filename!r}") try: self.con = sqlite3.connect(self.filename, check_same_thread=False) except sqlite3.Error as exc: raise CoverageException(f"Couldn't use data file {self.filename!r}: {exc}") from exc self.con.create_function("REGEXP", 2, _regexp) # This pragma makes writing faster. It disables rollbacks, but we never need them. # PyPy needs the .close() calls here, or sqlite gets twisted up: # https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on self.execute("pragma journal_mode=off").close() # This pragma makes writing faster. self.execute("pragma synchronous=off").close() def close(self): """If needed, close the connection.""" if self.con is not None and self.filename != ":memory:": self.con.close() self.con = None def __enter__(self): if self.nest == 0: self._connect() self.con.__enter__() self.nest += 1 return self def __exit__(self, exc_type, exc_value, traceback): self.nest -= 1 if self.nest == 0: try: self.con.__exit__(exc_type, exc_value, traceback) self.close() except Exception as exc: if self.debug: self.debug.write(f"EXCEPTION from __exit__: {exc}") raise CoverageException(f"Couldn't end data file {self.filename!r}: {exc}") from exc def execute(self, sql, parameters=()): """Same as :meth:`python:sqlite3.Connection.execute`.""" if self.debug: tail = f" with {parameters!r}" if parameters else "" self.debug.write(f"Executing {sql!r}{tail}") try: try: return self.con.execute(sql, parameters) except Exception: # In some cases, an error might happen that isn't really an # error. Try again immediately. # https://github.com/nedbat/coveragepy/issues/1010 return self.con.execute(sql, parameters) except sqlite3.Error as exc: msg = str(exc) try: # `execute` is the first thing we do with the database, so try # hard to provide useful hints if something goes wrong now. with open(self.filename, "rb") as bad_file: cov4_sig = b"!coverage.py: This is a private format" if bad_file.read(len(cov4_sig)) == cov4_sig: msg = ( "Looks like a coverage 4.x data file. " + "Are you mixing versions of coverage?" ) except Exception: # pragma: cant happen pass if self.debug: self.debug.write(f"EXCEPTION from execute: {msg}") raise CoverageException(f"Couldn't use data file {self.filename!r}: {msg}") from exc def execute_one(self, sql, parameters=()): """Execute a statement and return the one row that results. This is like execute(sql, parameters).fetchone(), except it is correct in reading the entire result set. This will raise an exception if more than one row results. Returns a row, or None if there were no rows. """ rows = list(self.execute(sql, parameters)) if len(rows) == 0: return None elif len(rows) == 1: return rows[0] else: raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows") def executemany(self, sql, data): """Same as :meth:`python:sqlite3.Connection.executemany`.""" if self.debug: data = list(data) self.debug.write(f"Executing many {sql!r} with {len(data)} rows") try: return self.con.executemany(sql, data) except Exception: # pragma: cant happen # In some cases, an error might happen that isn't really an # error. Try again immediately. # https://github.com/nedbat/coveragepy/issues/1010 return self.con.executemany(sql, data) def executescript(self, script): """Same as :meth:`python:sqlite3.Connection.executescript`.""" if self.debug: self.debug.write("Executing script with {} chars: {}".format( len(script), clipped_repr(script, 100), )) self.con.executescript(script) def dump(self): """Return a multi-line string, the SQL dump of the database.""" return "\n".join(self.con.iterdump()) def _regexp(text, pattern): """A regexp function for SQLite.""" return re.search(text, pattern) is not None
44,899
Python
38.043478
100
0.559946
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/annotate.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Source file annotation for coverage.py.""" import os import re from coverage.files import flat_rootname from coverage.misc import ensure_dir, isolate_module from coverage.report import get_analysis_to_report os = isolate_module(os) class AnnotateReporter: """Generate annotated source files showing line coverage. This reporter creates annotated copies of the measured source files. Each .py file is copied as a .py,cover file, with a left-hand margin annotating each line:: > def h(x): - if 0: #pragma: no cover - pass > if x == 1: ! a = 1 > else: > a = 2 > h(2) Executed lines use '>', lines not executed use '!', lines excluded from consideration use '-'. """ def __init__(self, coverage): self.coverage = coverage self.config = self.coverage.config self.directory = None blank_re = re.compile(r"\s*(#|$)") else_re = re.compile(r"\s*else\s*:\s*(#|$)") def report(self, morfs, directory=None): """Run the report. See `coverage.report()` for arguments. """ self.directory = directory self.coverage.get_data() for fr, analysis in get_analysis_to_report(self.coverage, morfs): self.annotate_file(fr, analysis) def annotate_file(self, fr, analysis): """Annotate a single file. `fr` is the FileReporter for the file to annotate. """ statements = sorted(analysis.statements) missing = sorted(analysis.missing) excluded = sorted(analysis.excluded) if self.directory: ensure_dir(self.directory) dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename())) if dest_file.endswith("_py"): dest_file = dest_file[:-3] + ".py" dest_file += ",cover" else: dest_file = fr.filename + ",cover" with open(dest_file, 'w', encoding='utf-8') as dest: i = j = 0 covered = True source = fr.source() for lineno, line in enumerate(source.splitlines(True), start=1): while i < len(statements) and statements[i] < lineno: i += 1 while j < len(missing) and missing[j] < lineno: j += 1 if i < len(statements) and statements[i] == lineno: covered = j >= len(missing) or missing[j] > lineno if self.blank_re.match(line): dest.write(' ') elif self.else_re.match(line): # Special logic for lines containing only 'else:'. if j >= len(missing): dest.write('> ') elif statements[i] == missing[j]: dest.write('! ') else: dest.write('> ') elif lineno in excluded: dest.write('- ') elif covered: dest.write('> ') else: dest.write('! ') dest.write(line)
3,381
Python
31.209524
91
0.524993
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/plugin.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ .. versionadded:: 4.0 Plug-in interfaces for coverage.py. Coverage.py supports a few different kinds of plug-ins that change its behavior: * File tracers implement tracing of non-Python file types. * Configurers add custom configuration, using Python code to change the configuration. * Dynamic context switchers decide when the dynamic context has changed, for example, to record what test function produced the coverage. To write a coverage.py plug-in, create a module with a subclass of :class:`~coverage.CoveragePlugin`. You will override methods in your class to participate in various aspects of coverage.py's processing. Different types of plug-ins have to override different methods. Any plug-in can optionally implement :meth:`~coverage.CoveragePlugin.sys_info` to provide debugging information about their operation. Your module must also contain a ``coverage_init`` function that registers an instance of your plug-in class:: import coverage class MyPlugin(coverage.CoveragePlugin): ... def coverage_init(reg, options): reg.add_file_tracer(MyPlugin()) You use the `reg` parameter passed to your ``coverage_init`` function to register your plug-in object. The registration method you call depends on what kind of plug-in it is. If your plug-in takes options, the `options` parameter is a dictionary of your plug-in's options from the coverage.py configuration file. Use them however you want to configure your object before registering it. Coverage.py will store its own information on your plug-in object, using attributes whose names start with ``_coverage_``. Don't be startled. .. warning:: Plug-ins are imported by coverage.py before it begins measuring code. If you write a plugin in your own project, it might import your product code before coverage.py can start measuring. This can result in your own code being reported as missing. One solution is to put your plugins in your project tree, but not in your importable Python package. .. _file_tracer_plugins: File Tracers ============ File tracers implement measurement support for non-Python files. File tracers implement the :meth:`~coverage.CoveragePlugin.file_tracer` method to claim files and the :meth:`~coverage.CoveragePlugin.file_reporter` method to report on those files. In your ``coverage_init`` function, use the ``add_file_tracer`` method to register your file tracer. .. _configurer_plugins: Configurers =========== .. versionadded:: 4.5 Configurers modify the configuration of coverage.py during start-up. Configurers implement the :meth:`~coverage.CoveragePlugin.configure` method to change the configuration. In your ``coverage_init`` function, use the ``add_configurer`` method to register your configurer. .. _dynamic_context_plugins: Dynamic Context Switchers ========================= .. versionadded:: 5.0 Dynamic context switcher plugins implement the :meth:`~coverage.CoveragePlugin.dynamic_context` method to dynamically compute the context label for each measured frame. Computed context labels are useful when you want to group measured data without modifying the source code. For example, you could write a plugin that checks `frame.f_code` to inspect the currently executed method, and set the context label to a fully qualified method name if it's an instance method of `unittest.TestCase` and the method name starts with 'test'. Such a plugin would provide basic coverage grouping by test and could be used with test runners that have no built-in coveragepy support. In your ``coverage_init`` function, use the ``add_dynamic_context`` method to register your dynamic context switcher. """ from coverage import files from coverage.misc import contract, _needs_to_implement class CoveragePlugin: """Base class for coverage.py plug-ins.""" def file_tracer(self, filename): # pylint: disable=unused-argument """Get a :class:`FileTracer` object for a file. Plug-in type: file tracer. Every Python source file is offered to your plug-in to give it a chance to take responsibility for tracing the file. If your plug-in can handle the file, it should return a :class:`FileTracer` object. Otherwise return None. There is no way to register your plug-in for particular files. Instead, this method is invoked for all files as they are executed, and the plug-in decides whether it can trace the file or not. Be prepared for `filename` to refer to all kinds of files that have nothing to do with your plug-in. The file name will be a Python file being executed. There are two broad categories of behavior for a plug-in, depending on the kind of files your plug-in supports: * Static file names: each of your original source files has been converted into a distinct Python file. Your plug-in is invoked with the Python file name, and it maps it back to its original source file. * Dynamic file names: all of your source files are executed by the same Python file. In this case, your plug-in implements :meth:`FileTracer.dynamic_source_filename` to provide the actual source file for each execution frame. `filename` is a string, the path to the file being considered. This is the absolute real path to the file. If you are comparing to other paths, be sure to take this into account. Returns a :class:`FileTracer` object to use to trace `filename`, or None if this plug-in cannot trace this file. """ return None def file_reporter(self, filename): # pylint: disable=unused-argument """Get the :class:`FileReporter` class to use for a file. Plug-in type: file tracer. This will only be invoked if `filename` returns non-None from :meth:`file_tracer`. It's an error to return None from this method. Returns a :class:`FileReporter` object to use to report on `filename`, or the string `"python"` to have coverage.py treat the file as Python. """ _needs_to_implement(self, "file_reporter") def dynamic_context(self, frame): # pylint: disable=unused-argument """Get the dynamically computed context label for `frame`. Plug-in type: dynamic context. This method is invoked for each frame when outside of a dynamic context, to see if a new dynamic context should be started. If it returns a string, a new context label is set for this and deeper frames. The dynamic context ends when this frame returns. Returns a string to start a new dynamic context, or None if no new context should be started. """ return None def find_executable_files(self, src_dir): # pylint: disable=unused-argument """Yield all of the executable files in `src_dir`, recursively. Plug-in type: file tracer. Executability is a plug-in-specific property, but generally means files which would have been considered for coverage analysis, had they been included automatically. Returns or yields a sequence of strings, the paths to files that could have been executed, including files that had been executed. """ return [] def configure(self, config): """Modify the configuration of coverage.py. Plug-in type: configurer. This method is called during coverage.py start-up, to give your plug-in a chance to change the configuration. The `config` parameter is an object with :meth:`~coverage.Coverage.get_option` and :meth:`~coverage.Coverage.set_option` methods. Do not call any other methods on the `config` object. """ pass def sys_info(self): """Get a list of information useful for debugging. Plug-in type: any. This method will be invoked for ``--debug=sys``. Your plug-in can return any information it wants to be displayed. Returns a list of pairs: `[(name, value), ...]`. """ return [] class FileTracer: """Support needed for files during the execution phase. File tracer plug-ins implement subclasses of FileTracer to return from their :meth:`~CoveragePlugin.file_tracer` method. You may construct this object from :meth:`CoveragePlugin.file_tracer` any way you like. A natural choice would be to pass the file name given to `file_tracer`. `FileTracer` objects should only be created in the :meth:`CoveragePlugin.file_tracer` method. See :ref:`howitworks` for details of the different coverage.py phases. """ def source_filename(self): """The source file name for this file. This may be any file name you like. A key responsibility of a plug-in is to own the mapping from Python execution back to whatever source file name was originally the source of the code. See :meth:`CoveragePlugin.file_tracer` for details about static and dynamic file names. Returns the file name to credit with this execution. """ _needs_to_implement(self, "source_filename") def has_dynamic_source_filename(self): """Does this FileTracer have dynamic source file names? FileTracers can provide dynamically determined file names by implementing :meth:`dynamic_source_filename`. Invoking that function is expensive. To determine whether to invoke it, coverage.py uses the result of this function to know if it needs to bother invoking :meth:`dynamic_source_filename`. See :meth:`CoveragePlugin.file_tracer` for details about static and dynamic file names. Returns True if :meth:`dynamic_source_filename` should be called to get dynamic source file names. """ return False def dynamic_source_filename(self, filename, frame): # pylint: disable=unused-argument """Get a dynamically computed source file name. Some plug-ins need to compute the source file name dynamically for each frame. This function will not be invoked if :meth:`has_dynamic_source_filename` returns False. Returns the source file name for this frame, or None if this frame shouldn't be measured. """ return None def line_number_range(self, frame): """Get the range of source line numbers for a given a call frame. The call frame is examined, and the source line number in the original file is returned. The return value is a pair of numbers, the starting line number and the ending line number, both inclusive. For example, returning (5, 7) means that lines 5, 6, and 7 should be considered executed. This function might decide that the frame doesn't indicate any lines from the source file were executed. Return (-1, -1) in this case to tell coverage.py that no lines should be recorded for this frame. """ lineno = frame.f_lineno return lineno, lineno class FileReporter: """Support needed for files during the analysis and reporting phases. File tracer plug-ins implement a subclass of `FileReporter`, and return instances from their :meth:`CoveragePlugin.file_reporter` method. There are many methods here, but only :meth:`lines` is required, to provide the set of executable lines in the file. See :ref:`howitworks` for details of the different coverage.py phases. """ def __init__(self, filename): """Simple initialization of a `FileReporter`. The `filename` argument is the path to the file being reported. This will be available as the `.filename` attribute on the object. Other method implementations on this base class rely on this attribute. """ self.filename = filename def __repr__(self): return "<{0.__class__.__name__} filename={0.filename!r}>".format(self) def relative_filename(self): """Get the relative file name for this file. This file path will be displayed in reports. The default implementation will supply the actual project-relative file path. You only need to supply this method if you have an unusual syntax for file paths. """ return files.relative_filename(self.filename) @contract(returns='unicode') def source(self): """Get the source for the file. Returns a Unicode string. The base implementation simply reads the `self.filename` file and decodes it as UTF-8. Override this method if your file isn't readable as a text file, or if you need other encoding support. """ with open(self.filename, "rb") as f: return f.read().decode("utf-8") def lines(self): """Get the executable lines in this file. Your plug-in must determine which lines in the file were possibly executable. This method returns a set of those line numbers. Returns a set of line numbers. """ _needs_to_implement(self, "lines") def excluded_lines(self): """Get the excluded executable lines in this file. Your plug-in can use any method it likes to allow the user to exclude executable lines from consideration. Returns a set of line numbers. The base implementation returns the empty set. """ return set() def translate_lines(self, lines): """Translate recorded lines into reported lines. Some file formats will want to report lines slightly differently than they are recorded. For example, Python records the last line of a multi-line statement, but reports are nicer if they mention the first line. Your plug-in can optionally define this method to perform these kinds of adjustment. `lines` is a sequence of integers, the recorded line numbers. Returns a set of integers, the adjusted line numbers. The base implementation returns the numbers unchanged. """ return set(lines) def arcs(self): """Get the executable arcs in this file. To support branch coverage, your plug-in needs to be able to indicate possible execution paths, as a set of line number pairs. Each pair is a `(prev, next)` pair indicating that execution can transition from the `prev` line number to the `next` line number. Returns a set of pairs of line numbers. The default implementation returns an empty set. """ return set() def no_branch_lines(self): """Get the lines excused from branch coverage in this file. Your plug-in can use any method it likes to allow the user to exclude lines from consideration of branch coverage. Returns a set of line numbers. The base implementation returns the empty set. """ return set() def translate_arcs(self, arcs): """Translate recorded arcs into reported arcs. Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of line number pairs. Returns a set of line number pairs. The default implementation returns `arcs` unchanged. """ return arcs def exit_counts(self): """Get a count of exits from that each line. To determine which lines are branches, coverage.py looks for lines that have more than one exit. This function creates a dict mapping each executable line number to a count of how many exits it has. To be honest, this feels wrong, and should be refactored. Let me know if you attempt to implement this method in your plug-in... """ return {} def missing_arc_description(self, start, end, executed_arcs=None): # pylint: disable=unused-argument """Provide an English sentence describing a missing arc. The `start` and `end` arguments are the line numbers of the missing arc. Negative numbers indicate entering or exiting code objects. The `executed_arcs` argument is a set of line number pairs, the arcs that were executed in this file. By default, this simply returns the string "Line {start} didn't jump to {end}". """ return f"Line {start} didn't jump to line {end}" def source_token_lines(self): """Generate a series of tokenized lines, one for each line in `source`. These tokens are used for syntax-colored reports. Each line is a list of pairs, each pair is a token:: [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] Each pair has a token class, and the token text. The token classes are: * ``'com'``: a comment * ``'key'``: a keyword * ``'nam'``: a name, or identifier * ``'num'``: a number * ``'op'``: an operator * ``'str'``: a string literal * ``'ws'``: some white space * ``'txt'``: some other kind of text If you concatenate all the token texts, and then join them with newlines, you should have your original source back. The default implementation simply returns each line tagged as ``'txt'``. """ for line in self.source().splitlines(): yield [('txt', line)] # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all # of them defined. def __eq__(self, other): return isinstance(other, FileReporter) and self.filename == other.filename def __ne__(self, other): return not (self == other) def __lt__(self, other): return self.filename < other.filename def __le__(self, other): return self.filename <= other.filename def __gt__(self, other): return self.filename > other.filename def __ge__(self, other): return self.filename >= other.filename __hash__ = None # This object doesn't need to be hashed.
18,499
Python
33.644195
108
0.670307
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/__main__.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Coverage.py's main entry point.""" import sys from coverage.cmdline import main sys.exit(main())
257
Python
27.666664
79
0.758755
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/xmlreport.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """XML reporting for coverage.py""" import os import os.path import sys import time import xml.dom.minidom from coverage import __url__, __version__, files from coverage.misc import isolate_module, human_sorted, human_sorted_items from coverage.report import get_analysis_to_report os = isolate_module(os) DTD_URL = 'https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd' def rate(hit, num): """Return the fraction of `hit`/`num`, as a string.""" if num == 0: return "1" else: return "%.4g" % (float(hit) / num) class XmlReporter: """A reporter for writing Cobertura-style XML coverage results.""" report_type = "XML report" def __init__(self, coverage): self.coverage = coverage self.config = self.coverage.config self.source_paths = set() if self.config.source: for src in self.config.source: if os.path.exists(src): if not self.config.relative_files: src = files.canonical_filename(src) self.source_paths.add(src) self.packages = {} self.xml_out = None def report(self, morfs, outfile=None): """Generate a Cobertura-compatible XML report for `morfs`. `morfs` is a list of modules or file names. `outfile` is a file object to write the XML to. """ # Initial setup. outfile = outfile or sys.stdout has_arcs = self.coverage.get_data().has_arcs() # Create the DOM that will store the data. impl = xml.dom.minidom.getDOMImplementation() self.xml_out = impl.createDocument(None, "coverage", None) # Write header stuff. xcoverage = self.xml_out.documentElement xcoverage.setAttribute("version", __version__) xcoverage.setAttribute("timestamp", str(int(time.time()*1000))) xcoverage.appendChild(self.xml_out.createComment( " Generated by coverage.py: %s " % __url__ )) xcoverage.appendChild(self.xml_out.createComment(" Based on %s " % DTD_URL)) # Call xml_file for each file in the data. for fr, analysis in get_analysis_to_report(self.coverage, morfs): self.xml_file(fr, analysis, has_arcs) xsources = self.xml_out.createElement("sources") xcoverage.appendChild(xsources) # Populate the XML DOM with the source info. for path in human_sorted(self.source_paths): xsource = self.xml_out.createElement("source") xsources.appendChild(xsource) txt = self.xml_out.createTextNode(path) xsource.appendChild(txt) lnum_tot, lhits_tot = 0, 0 bnum_tot, bhits_tot = 0, 0 xpackages = self.xml_out.createElement("packages") xcoverage.appendChild(xpackages) # Populate the XML DOM with the package info. for pkg_name, pkg_data in human_sorted_items(self.packages.items()): class_elts, lhits, lnum, bhits, bnum = pkg_data xpackage = self.xml_out.createElement("package") xpackages.appendChild(xpackage) xclasses = self.xml_out.createElement("classes") xpackage.appendChild(xclasses) for _, class_elt in human_sorted_items(class_elts.items()): xclasses.appendChild(class_elt) xpackage.setAttribute("name", pkg_name.replace(os.sep, '.')) xpackage.setAttribute("line-rate", rate(lhits, lnum)) if has_arcs: branch_rate = rate(bhits, bnum) else: branch_rate = "0" xpackage.setAttribute("branch-rate", branch_rate) xpackage.setAttribute("complexity", "0") lnum_tot += lnum lhits_tot += lhits bnum_tot += bnum bhits_tot += bhits xcoverage.setAttribute("lines-valid", str(lnum_tot)) xcoverage.setAttribute("lines-covered", str(lhits_tot)) xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot)) if has_arcs: xcoverage.setAttribute("branches-valid", str(bnum_tot)) xcoverage.setAttribute("branches-covered", str(bhits_tot)) xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot)) else: xcoverage.setAttribute("branches-covered", "0") xcoverage.setAttribute("branches-valid", "0") xcoverage.setAttribute("branch-rate", "0") xcoverage.setAttribute("complexity", "0") # Write the output file. outfile.write(serialize_xml(self.xml_out)) # Return the total percentage. denom = lnum_tot + bnum_tot if denom == 0: pct = 0.0 else: pct = 100.0 * (lhits_tot + bhits_tot) / denom return pct def xml_file(self, fr, analysis, has_arcs): """Add to the XML report for a single file.""" if self.config.skip_empty: if analysis.numbers.n_statements == 0: return # Create the 'lines' and 'package' XML elements, which # are populated later. Note that a package == a directory. filename = fr.filename.replace("\\", "/") for source_path in self.source_paths: source_path = files.canonical_filename(source_path) if filename.startswith(source_path.replace("\\", "/") + "/"): rel_name = filename[len(source_path)+1:] break else: rel_name = fr.relative_filename() self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/")) dirname = os.path.dirname(rel_name) or "." dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth]) package_name = dirname.replace("/", ".") package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0]) xclass = self.xml_out.createElement("class") xclass.appendChild(self.xml_out.createElement("methods")) xlines = self.xml_out.createElement("lines") xclass.appendChild(xlines) xclass.setAttribute("name", os.path.relpath(rel_name, dirname)) xclass.setAttribute("filename", rel_name.replace("\\", "/")) xclass.setAttribute("complexity", "0") branch_stats = analysis.branch_stats() missing_branch_arcs = analysis.missing_branch_arcs() # For each statement, create an XML 'line' element. for line in sorted(analysis.statements): xline = self.xml_out.createElement("line") xline.setAttribute("number", str(line)) # Q: can we get info about the number of times a statement is # executed? If so, that should be recorded here. xline.setAttribute("hits", str(int(line not in analysis.missing))) if has_arcs: if line in branch_stats: total, taken = branch_stats[line] xline.setAttribute("branch", "true") xline.setAttribute( "condition-coverage", "%d%% (%d/%d)" % (100*taken//total, taken, total) ) if line in missing_branch_arcs: annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]] xline.setAttribute("missing-branches", ",".join(annlines)) xlines.appendChild(xline) class_lines = len(analysis.statements) class_hits = class_lines - len(analysis.missing) if has_arcs: class_branches = sum(t for t, k in branch_stats.values()) missing_branches = sum(t - k for t, k in branch_stats.values()) class_br_hits = class_branches - missing_branches else: class_branches = 0.0 class_br_hits = 0.0 # Finalize the statistics that are collected in the XML DOM. xclass.setAttribute("line-rate", rate(class_hits, class_lines)) if has_arcs: branch_rate = rate(class_br_hits, class_branches) else: branch_rate = "0" xclass.setAttribute("branch-rate", branch_rate) package[0][rel_name] = xclass package[1] += class_hits package[2] += class_lines package[3] += class_br_hits package[4] += class_branches def serialize_xml(dom): """Serialize a minidom node to XML.""" return dom.toprettyxml()
8,646
Python
36.4329
95
0.588943
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/tomlconfig.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """TOML configuration support for coverage.py""" import configparser import os import re from coverage.exceptions import CoverageException from coverage.misc import import_third_party, substitute_variables # TOML support is an install-time extra option. (Import typing is here because # import_third_party will unload any module that wasn't already imported. # tomli imports typing, and if we unload it, later it's imported again, and on # Python 3.6, this causes infinite recursion.) import typing # pylint: disable=unused-import, wrong-import-order tomli = import_third_party("tomli") class TomlDecodeError(Exception): """An exception class that exists even when toml isn't installed.""" pass class TomlConfigParser: """TOML file reading with the interface of HandyConfigParser.""" # This class has the same interface as config.HandyConfigParser, no # need for docstrings. # pylint: disable=missing-function-docstring def __init__(self, our_file): self.our_file = our_file self.data = None def read(self, filenames): # RawConfigParser takes a filename or list of filenames, but we only # ever call this with a single filename. assert isinstance(filenames, (bytes, str, os.PathLike)) filename = os.fspath(filenames) try: with open(filename, encoding='utf-8') as fp: toml_text = fp.read() except OSError: return [] if tomli is not None: toml_text = substitute_variables(toml_text, os.environ) try: self.data = tomli.loads(toml_text) except tomli.TOMLDecodeError as err: raise TomlDecodeError(str(err)) from err return [filename] else: has_toml = re.search(r"^\[tool\.coverage\.", toml_text, flags=re.MULTILINE) if self.our_file or has_toml: # Looks like they meant to read TOML, but we can't read it. msg = "Can't read {!r} without TOML support. Install with [toml] extra" raise CoverageException(msg.format(filename)) return [] def _get_section(self, section): """Get a section from the data. Arguments: section (str): A section name, which can be dotted. Returns: name (str): the actual name of the section that was found, if any, or None. data (str): the dict of data in the section, or None if not found. """ prefixes = ["tool.coverage."] if self.our_file: prefixes.append("") for prefix in prefixes: real_section = prefix + section parts = real_section.split(".") try: data = self.data[parts[0]] for part in parts[1:]: data = data[part] except KeyError: continue break else: return None, None return real_section, data def _get(self, section, option): """Like .get, but returns the real section name and the value.""" name, data = self._get_section(section) if data is None: raise configparser.NoSectionError(section) try: return name, data[option] except KeyError as exc: raise configparser.NoOptionError(option, name) from exc def has_option(self, section, option): _, data = self._get_section(section) if data is None: return False return option in data def has_section(self, section): name, _ = self._get_section(section) return name def options(self, section): _, data = self._get_section(section) if data is None: raise configparser.NoSectionError(section) return list(data.keys()) def get_section(self, section): _, data = self._get_section(section) return data def get(self, section, option): _, value = self._get(section, option) return value def _check_type(self, section, option, value, type_, type_desc): if not isinstance(value, type_): raise ValueError( 'Option {!r} in section {!r} is not {}: {!r}' .format(option, section, type_desc, value) ) def getboolean(self, section, option): name, value = self._get(section, option) self._check_type(name, option, value, bool, "a boolean") return value def getlist(self, section, option): name, values = self._get(section, option) self._check_type(name, option, values, list, "a list") return values def getregexlist(self, section, option): name, values = self._get(section, option) self._check_type(name, option, values, list, "a list") for value in values: value = value.strip() try: re.compile(value) except re.error as e: raise CoverageException(f"Invalid [{name}].{option} value {value!r}: {e}") from e return values def getint(self, section, option): name, value = self._get(section, option) self._check_type(name, option, value, int, "an integer") return value def getfloat(self, section, option): name, value = self._get(section, option) if isinstance(value, int): value = float(value) self._check_type(name, option, value, float, "a float") return value
5,733
Python
33.751515
97
0.5955
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/fullcoverage/encodings.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Imposter encodings module that installs a coverage-style tracer. This is NOT the encodings module; it is an imposter that sets up tracing instrumentation and then replaces itself with the real encodings module. If the directory that holds this file is placed first in the PYTHONPATH when using "coverage" to run Python's tests, then this file will become the very first module imported by the internals of Python 3. It installs a coverage.py-compatible trace function that can watch Standard Library modules execute from the very earliest stages of Python's own boot process. This fixes a problem with coverage.py - that it starts too late to trace the coverage of many of the most fundamental modules in the Standard Library. """ import sys class FullCoverageTracer: def __init__(self): # `traces` is a list of trace events. Frames are tricky: the same # frame object is used for a whole scope, with new line numbers # written into it. So in one scope, all the frame objects are the # same object, and will eventually all will point to the last line # executed. So we keep the line numbers alongside the frames. # The list looks like: # # traces = [ # ((frame, event, arg), lineno), ... # ] # self.traces = [] def fullcoverage_trace(self, *args): frame, event, arg = args self.traces.append((args, frame.f_lineno)) return self.fullcoverage_trace sys.settrace(FullCoverageTracer().fullcoverage_trace) # In coverage/files.py is actual_filename(), which uses glob.glob. I don't # understand why, but that use of glob borks everything if fullcoverage is in # effect. So here we make an ugly hail-mary pass to switch off glob.glob over # there. This means when using fullcoverage, Windows path names will not be # their actual case. #sys.fullcoverage = True # Finally, remove our own directory from sys.path; remove ourselves from # sys.modules; and re-import "encodings", which will be the real package # this time. Note that the delete from sys.modules dictionary has to # happen last, since all of the symbols in this module will become None # at that exact moment, including "sys". parentdir = max(filter(__file__.startswith, sys.path), key=len) sys.path.remove(parentdir) del sys.modules['encodings'] import encodings
2,540
Python
40.655737
79
0.720472
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click-8.1.3.dist-info/LICENSE.rst
Copyright 2014 Pallets Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1,475
reStructuredText
49.89655
72
0.80678
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama/winterm.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. try: from msvcrt import get_osfhandle except ImportError: def get_osfhandle(_): raise OSError("This isn't windows!") from . import win32 # from wincon.h class WinColor(object): BLACK = 0 BLUE = 1 GREEN = 2 CYAN = 3 RED = 4 MAGENTA = 5 YELLOW = 6 GREY = 7 # from wincon.h class WinStyle(object): NORMAL = 0x00 # dim text, dim background BRIGHT = 0x08 # bright text, dim background BRIGHT_BACKGROUND = 0x80 # dim text, bright background class WinTerm(object): def __init__(self): self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes self.set_attrs(self._default) self._default_fore = self._fore self._default_back = self._back self._default_style = self._style # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style. # So that LIGHT_EX colors and BRIGHT style do not clobber each other, # we track them separately, since LIGHT_EX is overwritten by Fore/Back # and BRIGHT is overwritten by Style codes. self._light = 0 def get_attrs(self): return self._fore + self._back * 16 + (self._style | self._light) def set_attrs(self, value): self._fore = value & 7 self._back = (value >> 4) & 7 self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND) def reset_all(self, on_stderr=None): self.set_attrs(self._default) self.set_console(attrs=self._default) self._light = 0 def fore(self, fore=None, light=False, on_stderr=False): if fore is None: fore = self._default_fore self._fore = fore # Emulate LIGHT_EX with BRIGHT Style if light: self._light |= WinStyle.BRIGHT else: self._light &= ~WinStyle.BRIGHT self.set_console(on_stderr=on_stderr) def back(self, back=None, light=False, on_stderr=False): if back is None: back = self._default_back self._back = back # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style if light: self._light |= WinStyle.BRIGHT_BACKGROUND else: self._light &= ~WinStyle.BRIGHT_BACKGROUND self.set_console(on_stderr=on_stderr) def style(self, style=None, on_stderr=False): if style is None: style = self._default_style self._style = style self.set_console(on_stderr=on_stderr) def set_console(self, attrs=None, on_stderr=False): if attrs is None: attrs = self.get_attrs() handle = win32.STDOUT if on_stderr: handle = win32.STDERR win32.SetConsoleTextAttribute(handle, attrs) def get_position(self, handle): position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition # Because Windows coordinates are 0-based, # and win32.SetConsoleCursorPosition expects 1-based. position.X += 1 position.Y += 1 return position def set_cursor_position(self, position=None, on_stderr=False): if position is None: # I'm not currently tracking the position, so there is no default. # position = self.get_position() return handle = win32.STDOUT if on_stderr: handle = win32.STDERR win32.SetConsoleCursorPosition(handle, position) def cursor_adjust(self, x, y, on_stderr=False): handle = win32.STDOUT if on_stderr: handle = win32.STDERR position = self.get_position(handle) adjusted_position = (position.Y + y, position.X + x) win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False) def erase_screen(self, mode=0, on_stderr=False): # 0 should clear from the cursor to the end of the screen. # 1 should clear from the cursor to the beginning of the screen. # 2 should clear the entire screen, and move cursor to (1,1) handle = win32.STDOUT if on_stderr: handle = win32.STDERR csbi = win32.GetConsoleScreenBufferInfo(handle) # get the number of character cells in the current buffer cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y # get number of character cells before current cursor position cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X if mode == 0: from_coord = csbi.dwCursorPosition cells_to_erase = cells_in_screen - cells_before_cursor elif mode == 1: from_coord = win32.COORD(0, 0) cells_to_erase = cells_before_cursor elif mode == 2: from_coord = win32.COORD(0, 0) cells_to_erase = cells_in_screen else: # invalid mode return # fill the entire screen with blanks win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) # now set the buffer's attributes accordingly win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) if mode == 2: # put the cursor where needed win32.SetConsoleCursorPosition(handle, (1, 1)) def erase_line(self, mode=0, on_stderr=False): # 0 should clear from the cursor to the end of the line. # 1 should clear from the cursor to the beginning of the line. # 2 should clear the entire line. handle = win32.STDOUT if on_stderr: handle = win32.STDERR csbi = win32.GetConsoleScreenBufferInfo(handle) if mode == 0: from_coord = csbi.dwCursorPosition cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X elif mode == 1: from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) cells_to_erase = csbi.dwCursorPosition.X elif mode == 2: from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) cells_to_erase = csbi.dwSize.X else: # invalid mode return # fill the entire screen with blanks win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) # now set the buffer's attributes accordingly win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) def set_title(self, title): win32.SetConsoleTitle(title) def enable_vt_processing(fd): if win32.windll is None or not win32.winapi_test(): return False try: handle = get_osfhandle(fd) mode = win32.GetConsoleMode(handle) win32.SetConsoleMode( handle, mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING, ) mode = win32.GetConsoleMode(handle) if mode & win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING: return True # Can get TypeError in testsuite where 'fd' is a Mock() except (OSError, TypeError): return False
7,134
Python
35.403061
95
0.611158
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama/win32.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. # from winbase.h STDOUT = -11 STDERR = -12 ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 try: import ctypes from ctypes import LibraryLoader windll = LibraryLoader(ctypes.WinDLL) from ctypes import wintypes except (AttributeError, ImportError): windll = None SetConsoleTextAttribute = lambda *_: None winapi_test = lambda *_: None else: from ctypes import byref, Structure, c_char, POINTER COORD = wintypes._COORD class CONSOLE_SCREEN_BUFFER_INFO(Structure): """struct in wincon.h.""" _fields_ = [ ("dwSize", COORD), ("dwCursorPosition", COORD), ("wAttributes", wintypes.WORD), ("srWindow", wintypes.SMALL_RECT), ("dwMaximumWindowSize", COORD), ] def __str__(self): return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( self.dwSize.Y, self.dwSize.X , self.dwCursorPosition.Y, self.dwCursorPosition.X , self.wAttributes , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X ) _GetStdHandle = windll.kernel32.GetStdHandle _GetStdHandle.argtypes = [ wintypes.DWORD, ] _GetStdHandle.restype = wintypes.HANDLE _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo _GetConsoleScreenBufferInfo.argtypes = [ wintypes.HANDLE, POINTER(CONSOLE_SCREEN_BUFFER_INFO), ] _GetConsoleScreenBufferInfo.restype = wintypes.BOOL _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute _SetConsoleTextAttribute.argtypes = [ wintypes.HANDLE, wintypes.WORD, ] _SetConsoleTextAttribute.restype = wintypes.BOOL _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition _SetConsoleCursorPosition.argtypes = [ wintypes.HANDLE, COORD, ] _SetConsoleCursorPosition.restype = wintypes.BOOL _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA _FillConsoleOutputCharacterA.argtypes = [ wintypes.HANDLE, c_char, wintypes.DWORD, COORD, POINTER(wintypes.DWORD), ] _FillConsoleOutputCharacterA.restype = wintypes.BOOL _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute _FillConsoleOutputAttribute.argtypes = [ wintypes.HANDLE, wintypes.WORD, wintypes.DWORD, COORD, POINTER(wintypes.DWORD), ] _FillConsoleOutputAttribute.restype = wintypes.BOOL _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW _SetConsoleTitleW.argtypes = [ wintypes.LPCWSTR ] _SetConsoleTitleW.restype = wintypes.BOOL _GetConsoleMode = windll.kernel32.GetConsoleMode _GetConsoleMode.argtypes = [ wintypes.HANDLE, POINTER(wintypes.DWORD) ] _GetConsoleMode.restype = wintypes.BOOL _SetConsoleMode = windll.kernel32.SetConsoleMode _SetConsoleMode.argtypes = [ wintypes.HANDLE, wintypes.DWORD ] _SetConsoleMode.restype = wintypes.BOOL def _winapi_test(handle): csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( handle, byref(csbi)) return bool(success) def winapi_test(): return any(_winapi_test(h) for h in (_GetStdHandle(STDOUT), _GetStdHandle(STDERR))) def GetConsoleScreenBufferInfo(stream_id=STDOUT): handle = _GetStdHandle(stream_id) csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( handle, byref(csbi)) return csbi def SetConsoleTextAttribute(stream_id, attrs): handle = _GetStdHandle(stream_id) return _SetConsoleTextAttribute(handle, attrs) def SetConsoleCursorPosition(stream_id, position, adjust=True): position = COORD(*position) # If the position is out of range, do nothing. if position.Y <= 0 or position.X <= 0: return # Adjust for Windows' SetConsoleCursorPosition: # 1. being 0-based, while ANSI is 1-based. # 2. expecting (x,y), while ANSI uses (y,x). adjusted_position = COORD(position.Y - 1, position.X - 1) if adjust: # Adjust for viewport's scroll position sr = GetConsoleScreenBufferInfo(STDOUT).srWindow adjusted_position.Y += sr.Top adjusted_position.X += sr.Left # Resume normal processing handle = _GetStdHandle(stream_id) return _SetConsoleCursorPosition(handle, adjusted_position) def FillConsoleOutputCharacter(stream_id, char, length, start): handle = _GetStdHandle(stream_id) char = c_char(char.encode()) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) # Note that this is hard-coded for ANSI (vs wide) bytes. success = _FillConsoleOutputCharacterA( handle, char, length, start, byref(num_written)) return num_written.value def FillConsoleOutputAttribute(stream_id, attr, length, start): ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' handle = _GetStdHandle(stream_id) attribute = wintypes.WORD(attr) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) # Note that this is hard-coded for ANSI (vs wide) bytes. return _FillConsoleOutputAttribute( handle, attribute, length, start, byref(num_written)) def SetConsoleTitle(title): return _SetConsoleTitleW(title) def GetConsoleMode(handle): mode = wintypes.DWORD() success = _GetConsoleMode(handle, byref(mode)) if not success: raise ctypes.WinError() return mode.value def SetConsoleMode(handle, mode): success = _SetConsoleMode(handle, mode) if not success: raise ctypes.WinError()
6,181
Python
33.154696
111
0.646336
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama/initialise.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. import atexit import contextlib import sys from .ansitowin32 import AnsiToWin32 def _wipe_internal_state_for_tests(): global orig_stdout, orig_stderr orig_stdout = None orig_stderr = None global wrapped_stdout, wrapped_stderr wrapped_stdout = None wrapped_stderr = None global atexit_done atexit_done = False global fixed_windows_console fixed_windows_console = False try: # no-op if it wasn't registered atexit.unregister(reset_all) except AttributeError: # python 2: no atexit.unregister. Oh well, we did our best. pass def reset_all(): if AnsiToWin32 is not None: # Issue #74: objects might become None at exit AnsiToWin32(orig_stdout).reset_all() def init(autoreset=False, convert=None, strip=None, wrap=True): if not wrap and any([autoreset, convert, strip]): raise ValueError('wrap=False conflicts with any other arg=True') global wrapped_stdout, wrapped_stderr global orig_stdout, orig_stderr orig_stdout = sys.stdout orig_stderr = sys.stderr if sys.stdout is None: wrapped_stdout = None else: sys.stdout = wrapped_stdout = \ wrap_stream(orig_stdout, convert, strip, autoreset, wrap) if sys.stderr is None: wrapped_stderr = None else: sys.stderr = wrapped_stderr = \ wrap_stream(orig_stderr, convert, strip, autoreset, wrap) global atexit_done if not atexit_done: atexit.register(reset_all) atexit_done = True def deinit(): if orig_stdout is not None: sys.stdout = orig_stdout if orig_stderr is not None: sys.stderr = orig_stderr def just_fix_windows_console(): global fixed_windows_console if sys.platform != "win32": return if fixed_windows_console: return if wrapped_stdout is not None or wrapped_stderr is not None: # Someone already ran init() and it did stuff, so we won't second-guess them return # On newer versions of Windows, AnsiToWin32.__init__ will implicitly enable the # native ANSI support in the console as a side-effect. We only need to actually # replace sys.stdout/stderr if we're in the old-style conversion mode. new_stdout = AnsiToWin32(sys.stdout, convert=None, strip=None, autoreset=False) if new_stdout.convert: sys.stdout = new_stdout new_stderr = AnsiToWin32(sys.stderr, convert=None, strip=None, autoreset=False) if new_stderr.convert: sys.stderr = new_stderr fixed_windows_console = True @contextlib.contextmanager def colorama_text(*args, **kwargs): init(*args, **kwargs) try: yield finally: deinit() def reinit(): if wrapped_stdout is not None: sys.stdout = wrapped_stdout if wrapped_stderr is not None: sys.stderr = wrapped_stderr def wrap_stream(stream, convert, strip, autoreset, wrap): if wrap: wrapper = AnsiToWin32(stream, convert=convert, strip=strip, autoreset=autoreset) if wrapper.should_wrap(): stream = wrapper.stream return stream # Use this for initial setup as well, to reduce code duplication _wipe_internal_state_for_tests()
3,325
Python
26.262295
84
0.663759
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama/__init__.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. from .initialise import init, deinit, reinit, colorama_text, just_fix_windows_console from .ansi import Fore, Back, Style, Cursor from .ansitowin32 import AnsiToWin32 __version__ = '0.4.6'
266
Python
32.374996
85
0.763158
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama/ansi.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. ''' This module generates ANSI character codes to printing colors to terminals. See: http://en.wikipedia.org/wiki/ANSI_escape_code ''' CSI = '\033[' OSC = '\033]' BEL = '\a' def code_to_chars(code): return CSI + str(code) + 'm' def set_title(title): return OSC + '2;' + title + BEL def clear_screen(mode=2): return CSI + str(mode) + 'J' def clear_line(mode=2): return CSI + str(mode) + 'K' class AnsiCodes(object): def __init__(self): # the subclasses declare class attributes which are numbers. # Upon instantiation we define instance attributes, which are the same # as the class attributes but wrapped with the ANSI escape sequence for name in dir(self): if not name.startswith('_'): value = getattr(self, name) setattr(self, name, code_to_chars(value)) class AnsiCursor(object): def UP(self, n=1): return CSI + str(n) + 'A' def DOWN(self, n=1): return CSI + str(n) + 'B' def FORWARD(self, n=1): return CSI + str(n) + 'C' def BACK(self, n=1): return CSI + str(n) + 'D' def POS(self, x=1, y=1): return CSI + str(y) + ';' + str(x) + 'H' class AnsiFore(AnsiCodes): BLACK = 30 RED = 31 GREEN = 32 YELLOW = 33 BLUE = 34 MAGENTA = 35 CYAN = 36 WHITE = 37 RESET = 39 # These are fairly well supported, but not part of the standard. LIGHTBLACK_EX = 90 LIGHTRED_EX = 91 LIGHTGREEN_EX = 92 LIGHTYELLOW_EX = 93 LIGHTBLUE_EX = 94 LIGHTMAGENTA_EX = 95 LIGHTCYAN_EX = 96 LIGHTWHITE_EX = 97 class AnsiBack(AnsiCodes): BLACK = 40 RED = 41 GREEN = 42 YELLOW = 43 BLUE = 44 MAGENTA = 45 CYAN = 46 WHITE = 47 RESET = 49 # These are fairly well supported, but not part of the standard. LIGHTBLACK_EX = 100 LIGHTRED_EX = 101 LIGHTGREEN_EX = 102 LIGHTYELLOW_EX = 103 LIGHTBLUE_EX = 104 LIGHTMAGENTA_EX = 105 LIGHTCYAN_EX = 106 LIGHTWHITE_EX = 107 class AnsiStyle(AnsiCodes): BRIGHT = 1 DIM = 2 NORMAL = 22 RESET_ALL = 0 Fore = AnsiFore() Back = AnsiBack() Style = AnsiStyle() Cursor = AnsiCursor()
2,522
Python
23.495145
78
0.540048
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama/ansitowin32.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. import re import sys import os from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL from .winterm import enable_vt_processing, WinTerm, WinColor, WinStyle from .win32 import windll, winapi_test winterm = None if windll is not None: winterm = WinTerm() class StreamWrapper(object): ''' Wraps a stream (such as stdout), acting as a transparent proxy for all attribute access apart from method 'write()', which is delegated to our Converter instance. ''' def __init__(self, wrapped, converter): # double-underscore everything to prevent clashes with names of # attributes on the wrapped stream object. self.__wrapped = wrapped self.__convertor = converter def __getattr__(self, name): return getattr(self.__wrapped, name) def __enter__(self, *args, **kwargs): # special method lookup bypasses __getattr__/__getattribute__, see # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit # thus, contextlib magic methods are not proxied via __getattr__ return self.__wrapped.__enter__(*args, **kwargs) def __exit__(self, *args, **kwargs): return self.__wrapped.__exit__(*args, **kwargs) def __setstate__(self, state): self.__dict__ = state def __getstate__(self): return self.__dict__ def write(self, text): self.__convertor.write(text) def isatty(self): stream = self.__wrapped if 'PYCHARM_HOSTED' in os.environ: if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__): return True try: stream_isatty = stream.isatty except AttributeError: return False else: return stream_isatty() @property def closed(self): stream = self.__wrapped try: return stream.closed # AttributeError in the case that the stream doesn't support being closed # ValueError for the case that the stream has already been detached when atexit runs except (AttributeError, ValueError): return True class AnsiToWin32(object): ''' Implements a 'write()' method which, on Windows, will strip ANSI character sequences from the text, and if outputting to a tty, will convert them into win32 function calls. ''' ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer ANSI_OSC_RE = re.compile('\001?\033\\]([^\a]*)(\a)\002?') # Operating System Command def __init__(self, wrapped, convert=None, strip=None, autoreset=False): # The wrapped stream (normally sys.stdout or sys.stderr) self.wrapped = wrapped # should we reset colors to defaults after every .write() self.autoreset = autoreset # create the proxy wrapping our output stream self.stream = StreamWrapper(wrapped, self) on_windows = os.name == 'nt' # We test if the WinAPI works, because even if we are on Windows # we may be using a terminal that doesn't support the WinAPI # (e.g. Cygwin Terminal). In this case it's up to the terminal # to support the ANSI codes. conversion_supported = on_windows and winapi_test() try: fd = wrapped.fileno() except Exception: fd = -1 system_has_native_ansi = not on_windows or enable_vt_processing(fd) have_tty = not self.stream.closed and self.stream.isatty() need_conversion = conversion_supported and not system_has_native_ansi # should we strip ANSI sequences from our output? if strip is None: strip = need_conversion or not have_tty self.strip = strip # should we should convert ANSI sequences into win32 calls? if convert is None: convert = need_conversion and have_tty self.convert = convert # dict of ansi codes to win32 functions and parameters self.win32_calls = self.get_win32_calls() # are we wrapping stderr? self.on_stderr = self.wrapped is sys.stderr def should_wrap(self): ''' True if this class is actually needed. If false, then the output stream will not be affected, nor will win32 calls be issued, so wrapping stdout is not actually required. This will generally be False on non-Windows platforms, unless optional functionality like autoreset has been requested using kwargs to init() ''' return self.convert or self.strip or self.autoreset def get_win32_calls(self): if self.convert and winterm: return { AnsiStyle.RESET_ALL: (winterm.reset_all, ), AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), AnsiFore.RED: (winterm.fore, WinColor.RED), AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), AnsiFore.WHITE: (winterm.fore, WinColor.GREY), AnsiFore.RESET: (winterm.fore, ), AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True), AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True), AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True), AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True), AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True), AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True), AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True), AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True), AnsiBack.BLACK: (winterm.back, WinColor.BLACK), AnsiBack.RED: (winterm.back, WinColor.RED), AnsiBack.GREEN: (winterm.back, WinColor.GREEN), AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), AnsiBack.BLUE: (winterm.back, WinColor.BLUE), AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), AnsiBack.CYAN: (winterm.back, WinColor.CYAN), AnsiBack.WHITE: (winterm.back, WinColor.GREY), AnsiBack.RESET: (winterm.back, ), AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True), AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True), AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True), AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True), AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True), AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True), AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True), AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True), } return dict() def write(self, text): if self.strip or self.convert: self.write_and_convert(text) else: self.wrapped.write(text) self.wrapped.flush() if self.autoreset: self.reset_all() def reset_all(self): if self.convert: self.call_win32('m', (0,)) elif not self.strip and not self.stream.closed: self.wrapped.write(Style.RESET_ALL) def write_and_convert(self, text): ''' Write the given text to our wrapped stream, stripping any ANSI sequences from the text, and optionally converting them into win32 calls. ''' cursor = 0 text = self.convert_osc(text) for match in self.ANSI_CSI_RE.finditer(text): start, end = match.span() self.write_plain_text(text, cursor, start) self.convert_ansi(*match.groups()) cursor = end self.write_plain_text(text, cursor, len(text)) def write_plain_text(self, text, start, end): if start < end: self.wrapped.write(text[start:end]) self.wrapped.flush() def convert_ansi(self, paramstring, command): if self.convert: params = self.extract_params(command, paramstring) self.call_win32(command, params) def extract_params(self, command, paramstring): if command in 'Hf': params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';')) while len(params) < 2: # defaults: params = params + (1,) else: params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0) if len(params) == 0: # defaults: if command in 'JKm': params = (0,) elif command in 'ABCD': params = (1,) return params def call_win32(self, command, params): if command == 'm': for param in params: if param in self.win32_calls: func_args = self.win32_calls[param] func = func_args[0] args = func_args[1:] kwargs = dict(on_stderr=self.on_stderr) func(*args, **kwargs) elif command in 'J': winterm.erase_screen(params[0], on_stderr=self.on_stderr) elif command in 'K': winterm.erase_line(params[0], on_stderr=self.on_stderr) elif command in 'Hf': # cursor position - absolute winterm.set_cursor_position(params, on_stderr=self.on_stderr) elif command in 'ABCD': # cursor position - relative n = params[0] # A - up, B - down, C - forward, D - back x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command] winterm.cursor_adjust(x, y, on_stderr=self.on_stderr) def convert_osc(self, text): for match in self.ANSI_OSC_RE.finditer(text): start, end = match.span() text = text[:start] + text[end:] paramstring, command = match.groups() if command == BEL: if paramstring.count(";") == 1: params = paramstring.split(";") # 0 - change title and icon (we will only change title) # 1 - change icon (we don't support this) # 2 - change title if params[0] in '02': winterm.set_title(params[1]) return text def flush(self): self.wrapped.flush()
11,128
Python
39.032374
103
0.580697
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama/tests/winterm_test.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. import sys from unittest import TestCase, main, skipUnless try: from unittest.mock import Mock, patch except ImportError: from mock import Mock, patch from ..winterm import WinColor, WinStyle, WinTerm class WinTermTest(TestCase): @patch('colorama.winterm.win32') def testInit(self, mockWin32): mockAttr = Mock() mockAttr.wAttributes = 7 + 6 * 16 + 8 mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr term = WinTerm() self.assertEqual(term._fore, 7) self.assertEqual(term._back, 6) self.assertEqual(term._style, 8) @skipUnless(sys.platform.startswith("win"), "requires Windows") def testGetAttrs(self): term = WinTerm() term._fore = 0 term._back = 0 term._style = 0 self.assertEqual(term.get_attrs(), 0) term._fore = WinColor.YELLOW self.assertEqual(term.get_attrs(), WinColor.YELLOW) term._back = WinColor.MAGENTA self.assertEqual( term.get_attrs(), WinColor.YELLOW + WinColor.MAGENTA * 16) term._style = WinStyle.BRIGHT self.assertEqual( term.get_attrs(), WinColor.YELLOW + WinColor.MAGENTA * 16 + WinStyle.BRIGHT) @patch('colorama.winterm.win32') def testResetAll(self, mockWin32): mockAttr = Mock() mockAttr.wAttributes = 1 + 2 * 16 + 8 mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr term = WinTerm() term.set_console = Mock() term._fore = -1 term._back = -1 term._style = -1 term.reset_all() self.assertEqual(term._fore, 1) self.assertEqual(term._back, 2) self.assertEqual(term._style, 8) self.assertEqual(term.set_console.called, True) @skipUnless(sys.platform.startswith("win"), "requires Windows") def testFore(self): term = WinTerm() term.set_console = Mock() term._fore = 0 term.fore(5) self.assertEqual(term._fore, 5) self.assertEqual(term.set_console.called, True) @skipUnless(sys.platform.startswith("win"), "requires Windows") def testBack(self): term = WinTerm() term.set_console = Mock() term._back = 0 term.back(5) self.assertEqual(term._back, 5) self.assertEqual(term.set_console.called, True) @skipUnless(sys.platform.startswith("win"), "requires Windows") def testStyle(self): term = WinTerm() term.set_console = Mock() term._style = 0 term.style(22) self.assertEqual(term._style, 22) self.assertEqual(term.set_console.called, True) @patch('colorama.winterm.win32') def testSetConsole(self, mockWin32): mockAttr = Mock() mockAttr.wAttributes = 0 mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr term = WinTerm() term.windll = Mock() term.set_console() self.assertEqual( mockWin32.SetConsoleTextAttribute.call_args, ((mockWin32.STDOUT, term.get_attrs()), {}) ) @patch('colorama.winterm.win32') def testSetConsoleOnStderr(self, mockWin32): mockAttr = Mock() mockAttr.wAttributes = 0 mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr term = WinTerm() term.windll = Mock() term.set_console(on_stderr=True) self.assertEqual( mockWin32.SetConsoleTextAttribute.call_args, ((mockWin32.STDERR, term.get_attrs()), {}) ) if __name__ == '__main__': main()
3,709
Python
27.10606
74
0.607441
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama/tests/initialise_test.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. import sys from unittest import TestCase, main, skipUnless try: from unittest.mock import patch, Mock except ImportError: from mock import patch, Mock from ..ansitowin32 import StreamWrapper from ..initialise import init, just_fix_windows_console, _wipe_internal_state_for_tests from .utils import osname, replace_by orig_stdout = sys.stdout orig_stderr = sys.stderr class InitTest(TestCase): @skipUnless(sys.stdout.isatty(), "sys.stdout is not a tty") def setUp(self): # sanity check self.assertNotWrapped() def tearDown(self): _wipe_internal_state_for_tests() sys.stdout = orig_stdout sys.stderr = orig_stderr def assertWrapped(self): self.assertIsNot(sys.stdout, orig_stdout, 'stdout should be wrapped') self.assertIsNot(sys.stderr, orig_stderr, 'stderr should be wrapped') self.assertTrue(isinstance(sys.stdout, StreamWrapper), 'bad stdout wrapper') self.assertTrue(isinstance(sys.stderr, StreamWrapper), 'bad stderr wrapper') def assertNotWrapped(self): self.assertIs(sys.stdout, orig_stdout, 'stdout should not be wrapped') self.assertIs(sys.stderr, orig_stderr, 'stderr should not be wrapped') @patch('colorama.initialise.reset_all') @patch('colorama.ansitowin32.winapi_test', lambda *_: True) @patch('colorama.ansitowin32.enable_vt_processing', lambda *_: False) def testInitWrapsOnWindows(self, _): with osname("nt"): init() self.assertWrapped() @patch('colorama.initialise.reset_all') @patch('colorama.ansitowin32.winapi_test', lambda *_: False) def testInitDoesntWrapOnEmulatedWindows(self, _): with osname("nt"): init() self.assertNotWrapped() def testInitDoesntWrapOnNonWindows(self): with osname("posix"): init() self.assertNotWrapped() def testInitDoesntWrapIfNone(self): with replace_by(None): init() # We can't use assertNotWrapped here because replace_by(None) # changes stdout/stderr already. self.assertIsNone(sys.stdout) self.assertIsNone(sys.stderr) def testInitAutoresetOnWrapsOnAllPlatforms(self): with osname("posix"): init(autoreset=True) self.assertWrapped() def testInitWrapOffDoesntWrapOnWindows(self): with osname("nt"): init(wrap=False) self.assertNotWrapped() def testInitWrapOffIncompatibleWithAutoresetOn(self): self.assertRaises(ValueError, lambda: init(autoreset=True, wrap=False)) @patch('colorama.win32.SetConsoleTextAttribute') @patch('colorama.initialise.AnsiToWin32') def testAutoResetPassedOn(self, mockATW32, _): with osname("nt"): init(autoreset=True) self.assertEqual(len(mockATW32.call_args_list), 2) self.assertEqual(mockATW32.call_args_list[1][1]['autoreset'], True) self.assertEqual(mockATW32.call_args_list[0][1]['autoreset'], True) @patch('colorama.initialise.AnsiToWin32') def testAutoResetChangeable(self, mockATW32): with osname("nt"): init() init(autoreset=True) self.assertEqual(len(mockATW32.call_args_list), 4) self.assertEqual(mockATW32.call_args_list[2][1]['autoreset'], True) self.assertEqual(mockATW32.call_args_list[3][1]['autoreset'], True) init() self.assertEqual(len(mockATW32.call_args_list), 6) self.assertEqual( mockATW32.call_args_list[4][1]['autoreset'], False) self.assertEqual( mockATW32.call_args_list[5][1]['autoreset'], False) @patch('colorama.initialise.atexit.register') def testAtexitRegisteredOnlyOnce(self, mockRegister): init() self.assertTrue(mockRegister.called) mockRegister.reset_mock() init() self.assertFalse(mockRegister.called) class JustFixWindowsConsoleTest(TestCase): def _reset(self): _wipe_internal_state_for_tests() sys.stdout = orig_stdout sys.stderr = orig_stderr def tearDown(self): self._reset() @patch("colorama.ansitowin32.winapi_test", lambda: True) def testJustFixWindowsConsole(self): if sys.platform != "win32": # just_fix_windows_console should be a no-op just_fix_windows_console() self.assertIs(sys.stdout, orig_stdout) self.assertIs(sys.stderr, orig_stderr) else: def fake_std(): # Emulate stdout=not a tty, stderr=tty # to check that we handle both cases correctly stdout = Mock() stdout.closed = False stdout.isatty.return_value = False stdout.fileno.return_value = 1 sys.stdout = stdout stderr = Mock() stderr.closed = False stderr.isatty.return_value = True stderr.fileno.return_value = 2 sys.stderr = stderr for native_ansi in [False, True]: with patch( 'colorama.ansitowin32.enable_vt_processing', lambda *_: native_ansi ): self._reset() fake_std() # Regular single-call test prev_stdout = sys.stdout prev_stderr = sys.stderr just_fix_windows_console() self.assertIs(sys.stdout, prev_stdout) if native_ansi: self.assertIs(sys.stderr, prev_stderr) else: self.assertIsNot(sys.stderr, prev_stderr) # second call without resetting is always a no-op prev_stdout = sys.stdout prev_stderr = sys.stderr just_fix_windows_console() self.assertIs(sys.stdout, prev_stdout) self.assertIs(sys.stderr, prev_stderr) self._reset() fake_std() # If init() runs first, just_fix_windows_console should be a no-op init() prev_stdout = sys.stdout prev_stderr = sys.stderr just_fix_windows_console() self.assertIs(prev_stdout, sys.stdout) self.assertIs(prev_stderr, sys.stderr) if __name__ == '__main__': main()
6,741
Python
34.48421
87
0.584483
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama/tests/ansi_test.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. import sys from unittest import TestCase, main from ..ansi import Back, Fore, Style from ..ansitowin32 import AnsiToWin32 stdout_orig = sys.stdout stderr_orig = sys.stderr class AnsiTest(TestCase): def setUp(self): # sanity check: stdout should be a file or StringIO object. # It will only be AnsiToWin32 if init() has previously wrapped it self.assertNotEqual(type(sys.stdout), AnsiToWin32) self.assertNotEqual(type(sys.stderr), AnsiToWin32) def tearDown(self): sys.stdout = stdout_orig sys.stderr = stderr_orig def testForeAttributes(self): self.assertEqual(Fore.BLACK, '\033[30m') self.assertEqual(Fore.RED, '\033[31m') self.assertEqual(Fore.GREEN, '\033[32m') self.assertEqual(Fore.YELLOW, '\033[33m') self.assertEqual(Fore.BLUE, '\033[34m') self.assertEqual(Fore.MAGENTA, '\033[35m') self.assertEqual(Fore.CYAN, '\033[36m') self.assertEqual(Fore.WHITE, '\033[37m') self.assertEqual(Fore.RESET, '\033[39m') # Check the light, extended versions. self.assertEqual(Fore.LIGHTBLACK_EX, '\033[90m') self.assertEqual(Fore.LIGHTRED_EX, '\033[91m') self.assertEqual(Fore.LIGHTGREEN_EX, '\033[92m') self.assertEqual(Fore.LIGHTYELLOW_EX, '\033[93m') self.assertEqual(Fore.LIGHTBLUE_EX, '\033[94m') self.assertEqual(Fore.LIGHTMAGENTA_EX, '\033[95m') self.assertEqual(Fore.LIGHTCYAN_EX, '\033[96m') self.assertEqual(Fore.LIGHTWHITE_EX, '\033[97m') def testBackAttributes(self): self.assertEqual(Back.BLACK, '\033[40m') self.assertEqual(Back.RED, '\033[41m') self.assertEqual(Back.GREEN, '\033[42m') self.assertEqual(Back.YELLOW, '\033[43m') self.assertEqual(Back.BLUE, '\033[44m') self.assertEqual(Back.MAGENTA, '\033[45m') self.assertEqual(Back.CYAN, '\033[46m') self.assertEqual(Back.WHITE, '\033[47m') self.assertEqual(Back.RESET, '\033[49m') # Check the light, extended versions. self.assertEqual(Back.LIGHTBLACK_EX, '\033[100m') self.assertEqual(Back.LIGHTRED_EX, '\033[101m') self.assertEqual(Back.LIGHTGREEN_EX, '\033[102m') self.assertEqual(Back.LIGHTYELLOW_EX, '\033[103m') self.assertEqual(Back.LIGHTBLUE_EX, '\033[104m') self.assertEqual(Back.LIGHTMAGENTA_EX, '\033[105m') self.assertEqual(Back.LIGHTCYAN_EX, '\033[106m') self.assertEqual(Back.LIGHTWHITE_EX, '\033[107m') def testStyleAttributes(self): self.assertEqual(Style.DIM, '\033[2m') self.assertEqual(Style.NORMAL, '\033[22m') self.assertEqual(Style.BRIGHT, '\033[1m') if __name__ == '__main__': main()
2,839
Python
35.883116
74
0.647059
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama/tests/__init__.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
75
Python
36.999982
74
0.786667
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama/tests/utils.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. from contextlib import contextmanager from io import StringIO import sys import os class StreamTTY(StringIO): def isatty(self): return True class StreamNonTTY(StringIO): def isatty(self): return False @contextmanager def osname(name): orig = os.name os.name = name yield os.name = orig @contextmanager def replace_by(stream): orig_stdout = sys.stdout orig_stderr = sys.stderr sys.stdout = stream sys.stderr = stream yield sys.stdout = orig_stdout sys.stderr = orig_stderr @contextmanager def replace_original_by(stream): orig_stdout = sys.__stdout__ orig_stderr = sys.__stderr__ sys.__stdout__ = stream sys.__stderr__ = stream yield sys.__stdout__ = orig_stdout sys.__stderr__ = orig_stderr @contextmanager def pycharm(): os.environ["PYCHARM_HOSTED"] = "1" non_tty = StreamNonTTY() with replace_by(non_tty), replace_original_by(non_tty): yield del os.environ["PYCHARM_HOSTED"]
1,079
Python
20.6
74
0.664504
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama/tests/isatty_test.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. import sys from unittest import TestCase, main from ..ansitowin32 import StreamWrapper, AnsiToWin32 from .utils import pycharm, replace_by, replace_original_by, StreamTTY, StreamNonTTY def is_a_tty(stream): return StreamWrapper(stream, None).isatty() class IsattyTest(TestCase): def test_TTY(self): tty = StreamTTY() self.assertTrue(is_a_tty(tty)) with pycharm(): self.assertTrue(is_a_tty(tty)) def test_nonTTY(self): non_tty = StreamNonTTY() self.assertFalse(is_a_tty(non_tty)) with pycharm(): self.assertFalse(is_a_tty(non_tty)) def test_withPycharm(self): with pycharm(): self.assertTrue(is_a_tty(sys.stderr)) self.assertTrue(is_a_tty(sys.stdout)) def test_withPycharmTTYOverride(self): tty = StreamTTY() with pycharm(), replace_by(tty): self.assertTrue(is_a_tty(tty)) def test_withPycharmNonTTYOverride(self): non_tty = StreamNonTTY() with pycharm(), replace_by(non_tty): self.assertFalse(is_a_tty(non_tty)) def test_withPycharmNoneOverride(self): with pycharm(): with replace_by(None), replace_original_by(None): self.assertFalse(is_a_tty(None)) self.assertFalse(is_a_tty(StreamNonTTY())) self.assertTrue(is_a_tty(StreamTTY())) def test_withPycharmStreamWrapped(self): with pycharm(): self.assertTrue(AnsiToWin32(StreamTTY()).stream.isatty()) self.assertFalse(AnsiToWin32(StreamNonTTY()).stream.isatty()) self.assertTrue(AnsiToWin32(sys.stdout).stream.isatty()) self.assertTrue(AnsiToWin32(sys.stderr).stream.isatty()) if __name__ == '__main__': main()
1,866
Python
31.189655
84
0.629689
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama/tests/ansitowin32_test.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. from io import StringIO, TextIOWrapper from unittest import TestCase, main try: from contextlib import ExitStack except ImportError: # python 2 from contextlib2 import ExitStack try: from unittest.mock import MagicMock, Mock, patch except ImportError: from mock import MagicMock, Mock, patch from ..ansitowin32 import AnsiToWin32, StreamWrapper from ..win32 import ENABLE_VIRTUAL_TERMINAL_PROCESSING from .utils import osname class StreamWrapperTest(TestCase): def testIsAProxy(self): mockStream = Mock() wrapper = StreamWrapper(mockStream, None) self.assertTrue( wrapper.random_attr is mockStream.random_attr ) def testDelegatesWrite(self): mockStream = Mock() mockConverter = Mock() wrapper = StreamWrapper(mockStream, mockConverter) wrapper.write('hello') self.assertTrue(mockConverter.write.call_args, (('hello',), {})) def testDelegatesContext(self): mockConverter = Mock() s = StringIO() with StreamWrapper(s, mockConverter) as fp: fp.write(u'hello') self.assertTrue(s.closed) def testProxyNoContextManager(self): mockStream = MagicMock() mockStream.__enter__.side_effect = AttributeError() mockConverter = Mock() with self.assertRaises(AttributeError) as excinfo: with StreamWrapper(mockStream, mockConverter) as wrapper: wrapper.write('hello') def test_closed_shouldnt_raise_on_closed_stream(self): stream = StringIO() stream.close() wrapper = StreamWrapper(stream, None) self.assertEqual(wrapper.closed, True) def test_closed_shouldnt_raise_on_detached_stream(self): stream = TextIOWrapper(StringIO()) stream.detach() wrapper = StreamWrapper(stream, None) self.assertEqual(wrapper.closed, True) class AnsiToWin32Test(TestCase): def testInit(self): mockStdout = Mock() auto = Mock() stream = AnsiToWin32(mockStdout, autoreset=auto) self.assertEqual(stream.wrapped, mockStdout) self.assertEqual(stream.autoreset, auto) @patch('colorama.ansitowin32.winterm', None) @patch('colorama.ansitowin32.winapi_test', lambda *_: True) def testStripIsTrueOnWindows(self): with osname('nt'): mockStdout = Mock() stream = AnsiToWin32(mockStdout) self.assertTrue(stream.strip) def testStripIsFalseOffWindows(self): with osname('posix'): mockStdout = Mock(closed=False) stream = AnsiToWin32(mockStdout) self.assertFalse(stream.strip) def testWriteStripsAnsi(self): mockStdout = Mock() stream = AnsiToWin32(mockStdout) stream.wrapped = Mock() stream.write_and_convert = Mock() stream.strip = True stream.write('abc') self.assertFalse(stream.wrapped.write.called) self.assertEqual(stream.write_and_convert.call_args, (('abc',), {})) def testWriteDoesNotStripAnsi(self): mockStdout = Mock() stream = AnsiToWin32(mockStdout) stream.wrapped = Mock() stream.write_and_convert = Mock() stream.strip = False stream.convert = False stream.write('abc') self.assertFalse(stream.write_and_convert.called) self.assertEqual(stream.wrapped.write.call_args, (('abc',), {})) def assert_autoresets(self, convert, autoreset=True): stream = AnsiToWin32(Mock()) stream.convert = convert stream.reset_all = Mock() stream.autoreset = autoreset stream.winterm = Mock() stream.write('abc') self.assertEqual(stream.reset_all.called, autoreset) def testWriteAutoresets(self): self.assert_autoresets(convert=True) self.assert_autoresets(convert=False) self.assert_autoresets(convert=True, autoreset=False) self.assert_autoresets(convert=False, autoreset=False) def testWriteAndConvertWritesPlainText(self): stream = AnsiToWin32(Mock()) stream.write_and_convert( 'abc' ) self.assertEqual( stream.wrapped.write.call_args, (('abc',), {}) ) def testWriteAndConvertStripsAllValidAnsi(self): stream = AnsiToWin32(Mock()) stream.call_win32 = Mock() data = [ 'abc\033[mdef', 'abc\033[0mdef', 'abc\033[2mdef', 'abc\033[02mdef', 'abc\033[002mdef', 'abc\033[40mdef', 'abc\033[040mdef', 'abc\033[0;1mdef', 'abc\033[40;50mdef', 'abc\033[50;30;40mdef', 'abc\033[Adef', 'abc\033[0Gdef', 'abc\033[1;20;128Hdef', ] for datum in data: stream.wrapped.write.reset_mock() stream.write_and_convert( datum ) self.assertEqual( [args[0] for args in stream.wrapped.write.call_args_list], [ ('abc',), ('def',) ] ) def testWriteAndConvertSkipsEmptySnippets(self): stream = AnsiToWin32(Mock()) stream.call_win32 = Mock() stream.write_and_convert( '\033[40m\033[41m' ) self.assertFalse( stream.wrapped.write.called ) def testWriteAndConvertCallsWin32WithParamsAndCommand(self): stream = AnsiToWin32(Mock()) stream.convert = True stream.call_win32 = Mock() stream.extract_params = Mock(return_value='params') data = { 'abc\033[adef': ('a', 'params'), 'abc\033[;;bdef': ('b', 'params'), 'abc\033[0cdef': ('c', 'params'), 'abc\033[;;0;;Gdef': ('G', 'params'), 'abc\033[1;20;128Hdef': ('H', 'params'), } for datum, expected in data.items(): stream.call_win32.reset_mock() stream.write_and_convert( datum ) self.assertEqual( stream.call_win32.call_args[0], expected ) def test_reset_all_shouldnt_raise_on_closed_orig_stdout(self): stream = StringIO() converter = AnsiToWin32(stream) stream.close() converter.reset_all() def test_wrap_shouldnt_raise_on_closed_orig_stdout(self): stream = StringIO() stream.close() with \ patch("colorama.ansitowin32.os.name", "nt"), \ patch("colorama.ansitowin32.winapi_test", lambda: True): converter = AnsiToWin32(stream) self.assertTrue(converter.strip) self.assertFalse(converter.convert) def test_wrap_shouldnt_raise_on_missing_closed_attr(self): with \ patch("colorama.ansitowin32.os.name", "nt"), \ patch("colorama.ansitowin32.winapi_test", lambda: True): converter = AnsiToWin32(object()) self.assertTrue(converter.strip) self.assertFalse(converter.convert) def testExtractParams(self): stream = AnsiToWin32(Mock()) data = { '': (0,), ';;': (0,), '2': (2,), ';;002;;': (2,), '0;1': (0, 1), ';;003;;456;;': (3, 456), '11;22;33;44;55': (11, 22, 33, 44, 55), } for datum, expected in data.items(): self.assertEqual(stream.extract_params('m', datum), expected) def testCallWin32UsesLookup(self): listener = Mock() stream = AnsiToWin32(listener) stream.win32_calls = { 1: (lambda *_, **__: listener(11),), 2: (lambda *_, **__: listener(22),), 3: (lambda *_, **__: listener(33),), } stream.call_win32('m', (3, 1, 99, 2)) self.assertEqual( [a[0][0] for a in listener.call_args_list], [33, 11, 22] ) def test_osc_codes(self): mockStdout = Mock() stream = AnsiToWin32(mockStdout, convert=True) with patch('colorama.ansitowin32.winterm') as winterm: data = [ '\033]0\x07', # missing arguments '\033]0;foo\x08', # wrong OSC command '\033]0;colorama_test_title\x07', # should work '\033]1;colorama_test_title\x07', # wrong set command '\033]2;colorama_test_title\x07', # should work '\033]' + ';' * 64 + '\x08', # see issue #247 ] for code in data: stream.write(code) self.assertEqual(winterm.set_title.call_count, 2) def test_native_windows_ansi(self): with ExitStack() as stack: def p(a, b): stack.enter_context(patch(a, b, create=True)) # Pretend to be on Windows p("colorama.ansitowin32.os.name", "nt") p("colorama.ansitowin32.winapi_test", lambda: True) p("colorama.win32.winapi_test", lambda: True) p("colorama.winterm.win32.windll", "non-None") p("colorama.winterm.get_osfhandle", lambda _: 1234) # Pretend that our mock stream has native ANSI support p( "colorama.winterm.win32.GetConsoleMode", lambda _: ENABLE_VIRTUAL_TERMINAL_PROCESSING, ) SetConsoleMode = Mock() p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) stdout = Mock() stdout.closed = False stdout.isatty.return_value = True stdout.fileno.return_value = 1 # Our fake console says it has native vt support, so AnsiToWin32 should # enable that support and do nothing else. stream = AnsiToWin32(stdout) SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) self.assertFalse(stream.strip) self.assertFalse(stream.convert) self.assertFalse(stream.should_wrap()) # Now let's pretend we're on an old Windows console, that doesn't have # native ANSI support. p("colorama.winterm.win32.GetConsoleMode", lambda _: 0) SetConsoleMode = Mock() p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) stream = AnsiToWin32(stdout) SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) self.assertTrue(stream.strip) self.assertTrue(stream.convert) self.assertTrue(stream.should_wrap()) if __name__ == '__main__': main()
10,678
Python
35.2
87
0.579884
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/importer.py
import importlib from typing import Any class ImportFromStringError(Exception): pass def import_from_string(import_str: Any) -> Any: if not isinstance(import_str, str): return import_str module_str, _, attrs_str = import_str.partition(":") if not module_str or not attrs_str: message = ( 'Import string "{import_str}" must be in format "<module>:<attribute>".' ) raise ImportFromStringError(message.format(import_str=import_str)) try: module = importlib.import_module(module_str) except ImportError as exc: if exc.name != module_str: raise exc from None message = 'Could not import module "{module_str}".' raise ImportFromStringError(message.format(module_str=module_str)) instance = module try: for attr_str in attrs_str.split("."): instance = getattr(instance, attr_str) except AttributeError: message = 'Attribute "{attrs_str}" not found in module "{module_str}".' raise ImportFromStringError( message.format(attrs_str=attrs_str, module_str=module_str) ) return instance
1,166
Python
28.923076
84
0.635506
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/main.py
import asyncio import logging import os import platform import ssl import sys import typing import click import uvicorn from uvicorn.config import ( HTTP_PROTOCOLS, INTERFACES, LIFESPAN, LOG_LEVELS, LOGGING_CONFIG, LOOP_SETUPS, SSL_PROTOCOL_VERSION, WS_PROTOCOLS, Config, HTTPProtocolType, InterfaceType, LifespanType, LoopSetupType, WSProtocolType, ) from uvicorn.server import Server, ServerState # noqa: F401 # Used to be defined here. from uvicorn.supervisors import ChangeReload, Multiprocess if typing.TYPE_CHECKING: from asgiref.typing import ASGIApplication LEVEL_CHOICES = click.Choice(list(LOG_LEVELS.keys())) HTTP_CHOICES = click.Choice(list(HTTP_PROTOCOLS.keys())) WS_CHOICES = click.Choice(list(WS_PROTOCOLS.keys())) LIFESPAN_CHOICES = click.Choice(list(LIFESPAN.keys())) LOOP_CHOICES = click.Choice([key for key in LOOP_SETUPS.keys() if key != "none"]) INTERFACE_CHOICES = click.Choice(INTERFACES) STARTUP_FAILURE = 3 logger = logging.getLogger("uvicorn.error") def print_version(ctx: click.Context, param: click.Parameter, value: bool) -> None: if not value or ctx.resilient_parsing: return click.echo( "Running uvicorn %s with %s %s on %s" % ( uvicorn.__version__, platform.python_implementation(), platform.python_version(), platform.system(), ) ) ctx.exit() @click.command(context_settings={"auto_envvar_prefix": "UVICORN"}) @click.argument("app") @click.option( "--host", type=str, default="127.0.0.1", help="Bind socket to this host.", show_default=True, ) @click.option( "--port", type=int, default=8000, help="Bind socket to this port.", show_default=True, ) @click.option("--uds", type=str, default=None, help="Bind to a UNIX domain socket.") @click.option( "--fd", type=int, default=None, help="Bind to socket from this file descriptor." ) @click.option("--reload", is_flag=True, default=False, help="Enable auto-reload.") @click.option( "--reload-dir", "reload_dirs", multiple=True, help="Set reload directories explicitly, instead of using the current working" " directory.", type=click.Path(exists=True), ) @click.option( "--reload-include", "reload_includes", multiple=True, help="Set glob patterns to include while watching for files. Includes '*.py' " "by default; these defaults can be overridden with `--reload-exclude`. " "This option has no effect unless watchfiles is installed.", ) @click.option( "--reload-exclude", "reload_excludes", multiple=True, help="Set glob patterns to exclude while watching for files. Includes " "'.*, .py[cod], .sw.*, ~*' by default; these defaults can be overridden " "with `--reload-include`. This option has no effect unless watchfiles is " "installed.", ) @click.option( "--reload-delay", type=float, default=0.25, show_default=True, help="Delay between previous and next check if application needs to be." " Defaults to 0.25s.", ) @click.option( "--workers", default=None, type=int, help="Number of worker processes. Defaults to the $WEB_CONCURRENCY environment" " variable if available, or 1. Not valid with --reload.", ) @click.option( "--loop", type=LOOP_CHOICES, default="auto", help="Event loop implementation.", show_default=True, ) @click.option( "--http", type=HTTP_CHOICES, default="auto", help="HTTP protocol implementation.", show_default=True, ) @click.option( "--ws", type=WS_CHOICES, default="auto", help="WebSocket protocol implementation.", show_default=True, ) @click.option( "--ws-max-size", type=int, default=16777216, help="WebSocket max size message in bytes", show_default=True, ) @click.option( "--ws-ping-interval", type=float, default=20.0, help="WebSocket ping interval", show_default=True, ) @click.option( "--ws-ping-timeout", type=float, default=20.0, help="WebSocket ping timeout", show_default=True, ) @click.option( "--ws-per-message-deflate", type=bool, default=True, help="WebSocket per-message-deflate compression", show_default=True, ) @click.option( "--lifespan", type=LIFESPAN_CHOICES, default="auto", help="Lifespan implementation.", show_default=True, ) @click.option( "--interface", type=INTERFACE_CHOICES, default="auto", help="Select ASGI3, ASGI2, or WSGI as the application interface.", show_default=True, ) @click.option( "--env-file", type=click.Path(exists=True), default=None, help="Environment configuration file.", show_default=True, ) @click.option( "--log-config", type=click.Path(exists=True), default=None, help="Logging configuration file. Supported formats: .ini, .json, .yaml.", show_default=True, ) @click.option( "--log-level", type=LEVEL_CHOICES, default=None, help="Log level. [default: info]", show_default=True, ) @click.option( "--access-log/--no-access-log", is_flag=True, default=True, help="Enable/Disable access log.", ) @click.option( "--use-colors/--no-use-colors", is_flag=True, default=None, help="Enable/Disable colorized logging.", ) @click.option( "--proxy-headers/--no-proxy-headers", is_flag=True, default=True, help="Enable/Disable X-Forwarded-Proto, X-Forwarded-For, X-Forwarded-Port to " "populate remote address info.", ) @click.option( "--server-header/--no-server-header", is_flag=True, default=True, help="Enable/Disable default Server header.", ) @click.option( "--date-header/--no-date-header", is_flag=True, default=True, help="Enable/Disable default Date header.", ) @click.option( "--forwarded-allow-ips", type=str, default=None, help="Comma separated list of IPs to trust with proxy headers. Defaults to" " the $FORWARDED_ALLOW_IPS environment variable if available, or '127.0.0.1'.", ) @click.option( "--root-path", type=str, default="", help="Set the ASGI 'root_path' for applications submounted below a given URL path.", ) @click.option( "--limit-concurrency", type=int, default=None, help="Maximum number of concurrent connections or tasks to allow, before issuing" " HTTP 503 responses.", ) @click.option( "--backlog", type=int, default=2048, help="Maximum number of connections to hold in backlog", ) @click.option( "--limit-max-requests", type=int, default=None, help="Maximum number of requests to service before terminating the process.", ) @click.option( "--timeout-keep-alive", type=int, default=5, help="Close Keep-Alive connections if no new data is received within this timeout.", show_default=True, ) @click.option( "--ssl-keyfile", type=str, default=None, help="SSL key file", show_default=True ) @click.option( "--ssl-certfile", type=str, default=None, help="SSL certificate file", show_default=True, ) @click.option( "--ssl-keyfile-password", type=str, default=None, help="SSL keyfile password", show_default=True, ) @click.option( "--ssl-version", type=int, default=int(SSL_PROTOCOL_VERSION), help="SSL version to use (see stdlib ssl module's)", show_default=True, ) @click.option( "--ssl-cert-reqs", type=int, default=int(ssl.CERT_NONE), help="Whether client certificate is required (see stdlib ssl module's)", show_default=True, ) @click.option( "--ssl-ca-certs", type=str, default=None, help="CA certificates file", show_default=True, ) @click.option( "--ssl-ciphers", type=str, default="TLSv1", help="Ciphers to use (see stdlib ssl module's)", show_default=True, ) @click.option( "--header", "headers", multiple=True, help="Specify custom default HTTP response headers as a Name:Value pair", ) @click.option( "--version", is_flag=True, callback=print_version, expose_value=False, is_eager=True, help="Display the uvicorn version and exit.", ) @click.option( "--app-dir", default="", show_default=True, help="Look for APP in the specified directory, by adding this to the PYTHONPATH." " Defaults to the current working directory.", ) @click.option( "--h11-max-incomplete-event-size", "h11_max_incomplete_event_size", type=int, default=None, help="For h11, the maximum number of bytes to buffer of an incomplete event.", ) @click.option( "--factory", is_flag=True, default=False, help="Treat APP as an application factory, i.e. a () -> <ASGI app> callable.", show_default=True, ) def main( app: str, host: str, port: int, uds: str, fd: int, loop: LoopSetupType, http: HTTPProtocolType, ws: WSProtocolType, ws_max_size: int, ws_ping_interval: float, ws_ping_timeout: float, ws_per_message_deflate: bool, lifespan: LifespanType, interface: InterfaceType, reload: bool, reload_dirs: typing.List[str], reload_includes: typing.List[str], reload_excludes: typing.List[str], reload_delay: float, workers: int, env_file: str, log_config: str, log_level: str, access_log: bool, proxy_headers: bool, server_header: bool, date_header: bool, forwarded_allow_ips: str, root_path: str, limit_concurrency: int, backlog: int, limit_max_requests: int, timeout_keep_alive: int, ssl_keyfile: str, ssl_certfile: str, ssl_keyfile_password: str, ssl_version: int, ssl_cert_reqs: int, ssl_ca_certs: str, ssl_ciphers: str, headers: typing.List[str], use_colors: bool, app_dir: str, h11_max_incomplete_event_size: typing.Optional[int], factory: bool, ) -> None: run( app, host=host, port=port, uds=uds, fd=fd, loop=loop, http=http, ws=ws, ws_max_size=ws_max_size, ws_ping_interval=ws_ping_interval, ws_ping_timeout=ws_ping_timeout, ws_per_message_deflate=ws_per_message_deflate, lifespan=lifespan, env_file=env_file, log_config=LOGGING_CONFIG if log_config is None else log_config, log_level=log_level, access_log=access_log, interface=interface, reload=reload, reload_dirs=reload_dirs or None, reload_includes=reload_includes or None, reload_excludes=reload_excludes or None, reload_delay=reload_delay, workers=workers, proxy_headers=proxy_headers, server_header=server_header, date_header=date_header, forwarded_allow_ips=forwarded_allow_ips, root_path=root_path, limit_concurrency=limit_concurrency, backlog=backlog, limit_max_requests=limit_max_requests, timeout_keep_alive=timeout_keep_alive, ssl_keyfile=ssl_keyfile, ssl_certfile=ssl_certfile, ssl_keyfile_password=ssl_keyfile_password, ssl_version=ssl_version, ssl_cert_reqs=ssl_cert_reqs, ssl_ca_certs=ssl_ca_certs, ssl_ciphers=ssl_ciphers, headers=[header.split(":", 1) for header in headers], # type: ignore[misc] use_colors=use_colors, factory=factory, app_dir=app_dir, h11_max_incomplete_event_size=h11_max_incomplete_event_size, ) def run( app: typing.Union["ASGIApplication", typing.Callable, str], *, host: str = "127.0.0.1", port: int = 8000, uds: typing.Optional[str] = None, fd: typing.Optional[int] = None, loop: LoopSetupType = "auto", http: typing.Union[typing.Type[asyncio.Protocol], HTTPProtocolType] = "auto", ws: typing.Union[typing.Type[asyncio.Protocol], WSProtocolType] = "auto", ws_max_size: int = 16777216, ws_ping_interval: typing.Optional[float] = 20.0, ws_ping_timeout: typing.Optional[float] = 20.0, ws_per_message_deflate: bool = True, lifespan: LifespanType = "auto", interface: InterfaceType = "auto", reload: bool = False, reload_dirs: typing.Optional[typing.Union[typing.List[str], str]] = None, reload_includes: typing.Optional[typing.Union[typing.List[str], str]] = None, reload_excludes: typing.Optional[typing.Union[typing.List[str], str]] = None, reload_delay: float = 0.25, workers: typing.Optional[int] = None, env_file: typing.Optional[typing.Union[str, os.PathLike]] = None, log_config: typing.Optional[ typing.Union[typing.Dict[str, typing.Any], str] ] = LOGGING_CONFIG, log_level: typing.Optional[typing.Union[str, int]] = None, access_log: bool = True, proxy_headers: bool = True, server_header: bool = True, date_header: bool = True, forwarded_allow_ips: typing.Optional[typing.Union[typing.List[str], str]] = None, root_path: str = "", limit_concurrency: typing.Optional[int] = None, backlog: int = 2048, limit_max_requests: typing.Optional[int] = None, timeout_keep_alive: int = 5, ssl_keyfile: typing.Optional[str] = None, ssl_certfile: typing.Optional[typing.Union[str, os.PathLike]] = None, ssl_keyfile_password: typing.Optional[str] = None, ssl_version: int = SSL_PROTOCOL_VERSION, ssl_cert_reqs: int = ssl.CERT_NONE, ssl_ca_certs: typing.Optional[str] = None, ssl_ciphers: str = "TLSv1", headers: typing.Optional[typing.List[typing.Tuple[str, str]]] = None, use_colors: typing.Optional[bool] = None, app_dir: typing.Optional[str] = None, factory: bool = False, h11_max_incomplete_event_size: typing.Optional[int] = None, ) -> None: if app_dir is not None: sys.path.insert(0, app_dir) config = Config( app, host=host, port=port, uds=uds, fd=fd, loop=loop, http=http, ws=ws, ws_max_size=ws_max_size, ws_ping_interval=ws_ping_interval, ws_ping_timeout=ws_ping_timeout, ws_per_message_deflate=ws_per_message_deflate, lifespan=lifespan, interface=interface, reload=reload, reload_dirs=reload_dirs, reload_includes=reload_includes, reload_excludes=reload_excludes, reload_delay=reload_delay, workers=workers, env_file=env_file, log_config=log_config, log_level=log_level, access_log=access_log, proxy_headers=proxy_headers, server_header=server_header, date_header=date_header, forwarded_allow_ips=forwarded_allow_ips, root_path=root_path, limit_concurrency=limit_concurrency, backlog=backlog, limit_max_requests=limit_max_requests, timeout_keep_alive=timeout_keep_alive, ssl_keyfile=ssl_keyfile, ssl_certfile=ssl_certfile, ssl_keyfile_password=ssl_keyfile_password, ssl_version=ssl_version, ssl_cert_reqs=ssl_cert_reqs, ssl_ca_certs=ssl_ca_certs, ssl_ciphers=ssl_ciphers, headers=headers, use_colors=use_colors, factory=factory, h11_max_incomplete_event_size=h11_max_incomplete_event_size, ) server = Server(config=config) if (config.reload or config.workers > 1) and not isinstance(app, str): logger = logging.getLogger("uvicorn.error") logger.warning( "You must pass the application as an import string to enable 'reload' or " "'workers'." ) sys.exit(1) if config.should_reload: sock = config.bind_socket() ChangeReload(config, target=server.run, sockets=[sock]).run() elif config.workers > 1: sock = config.bind_socket() Multiprocess(config, target=server.run, sockets=[sock]).run() else: server.run() if config.uds and os.path.exists(config.uds): os.remove(config.uds) # pragma: py-win32 if not server.started and not config.should_reload and config.workers == 1: sys.exit(STARTUP_FAILURE) if __name__ == "__main__": main() # pragma: no cover
16,265
Python
27.141868
88
0.639287
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/config.py
import asyncio import inspect import json import logging import logging.config import os import socket import ssl import sys from pathlib import Path from typing import ( TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union, ) from uvicorn.logging import TRACE_LOG_LEVEL if sys.version_info < (3, 8): # pragma: py-gte-38 from typing_extensions import Literal else: # pragma: py-lt-38 from typing import Literal import click from uvicorn.importer import ImportFromStringError, import_from_string from uvicorn.middleware.asgi2 import ASGI2Middleware from uvicorn.middleware.message_logger import MessageLoggerMiddleware from uvicorn.middleware.proxy_headers import ProxyHeadersMiddleware from uvicorn.middleware.wsgi import WSGIMiddleware if TYPE_CHECKING: from asgiref.typing import ASGIApplication HTTPProtocolType = Literal["auto", "h11", "httptools"] WSProtocolType = Literal["auto", "none", "websockets", "wsproto"] LifespanType = Literal["auto", "on", "off"] LoopSetupType = Literal["none", "auto", "asyncio", "uvloop"] InterfaceType = Literal["auto", "asgi3", "asgi2", "wsgi"] LOG_LEVELS: Dict[str, int] = { "critical": logging.CRITICAL, "error": logging.ERROR, "warning": logging.WARNING, "info": logging.INFO, "debug": logging.DEBUG, "trace": TRACE_LOG_LEVEL, } HTTP_PROTOCOLS: Dict[HTTPProtocolType, str] = { "auto": "uvicorn.protocols.http.auto:AutoHTTPProtocol", "h11": "uvicorn.protocols.http.h11_impl:H11Protocol", "httptools": "uvicorn.protocols.http.httptools_impl:HttpToolsProtocol", } WS_PROTOCOLS: Dict[WSProtocolType, Optional[str]] = { "auto": "uvicorn.protocols.websockets.auto:AutoWebSocketsProtocol", "none": None, "websockets": "uvicorn.protocols.websockets.websockets_impl:WebSocketProtocol", "wsproto": "uvicorn.protocols.websockets.wsproto_impl:WSProtocol", } LIFESPAN: Dict[LifespanType, str] = { "auto": "uvicorn.lifespan.on:LifespanOn", "on": "uvicorn.lifespan.on:LifespanOn", "off": "uvicorn.lifespan.off:LifespanOff", } LOOP_SETUPS: Dict[LoopSetupType, Optional[str]] = { "none": None, "auto": "uvicorn.loops.auto:auto_loop_setup", "asyncio": "uvicorn.loops.asyncio:asyncio_setup", "uvloop": "uvicorn.loops.uvloop:uvloop_setup", } INTERFACES: List[InterfaceType] = ["auto", "asgi3", "asgi2", "wsgi"] SSL_PROTOCOL_VERSION: int = ssl.PROTOCOL_TLS_SERVER LOGGING_CONFIG: Dict[str, Any] = { "version": 1, "disable_existing_loggers": False, "formatters": { "default": { "()": "uvicorn.logging.DefaultFormatter", "fmt": "%(levelprefix)s %(message)s", "use_colors": None, }, "access": { "()": "uvicorn.logging.AccessFormatter", "fmt": '%(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s', # noqa: E501 }, }, "handlers": { "default": { "formatter": "default", "class": "logging.StreamHandler", "stream": "ext://sys.stderr", }, "access": { "formatter": "access", "class": "logging.StreamHandler", "stream": "ext://sys.stdout", }, }, "loggers": { "uvicorn": {"handlers": ["default"], "level": "INFO", "propagate": False}, "uvicorn.error": {"level": "INFO"}, "uvicorn.access": {"handlers": ["access"], "level": "INFO", "propagate": False}, }, } logger = logging.getLogger("uvicorn.error") def create_ssl_context( certfile: Union[str, os.PathLike], keyfile: Optional[Union[str, os.PathLike]], password: Optional[str], ssl_version: int, cert_reqs: int, ca_certs: Optional[Union[str, os.PathLike]], ciphers: Optional[str], ) -> ssl.SSLContext: ctx = ssl.SSLContext(ssl_version) get_password = (lambda: password) if password else None ctx.load_cert_chain(certfile, keyfile, get_password) ctx.verify_mode = ssl.VerifyMode(cert_reqs) if ca_certs: ctx.load_verify_locations(ca_certs) if ciphers: ctx.set_ciphers(ciphers) return ctx def is_dir(path: Path) -> bool: try: if not path.is_absolute(): path = path.resolve() return path.is_dir() except OSError: return False def resolve_reload_patterns( patterns_list: List[str], directories_list: List[str] ) -> Tuple[List[str], List[Path]]: directories: List[Path] = list(set(map(Path, directories_list.copy()))) patterns: List[str] = patterns_list.copy() current_working_directory = Path.cwd() for pattern in patterns_list: # Special case for the .* pattern, otherwise this would only match # hidden directories which is probably undesired if pattern == ".*": continue patterns.append(pattern) if is_dir(Path(pattern)): directories.append(Path(pattern)) else: for match in current_working_directory.glob(pattern): if is_dir(match): directories.append(match) directories = list(set(directories)) directories = list(map(Path, directories)) directories = list(map(lambda x: x.resolve(), directories)) directories = list( {reload_path for reload_path in directories if is_dir(reload_path)} ) children = [] for j in range(len(directories)): for k in range(j + 1, len(directories)): if directories[j] in directories[k].parents: children.append(directories[k]) # pragma: py-darwin elif directories[k] in directories[j].parents: children.append(directories[j]) directories = list(set(directories).difference(set(children))) return list(set(patterns)), directories def _normalize_dirs(dirs: Union[List[str], str, None]) -> List[str]: if dirs is None: return [] if isinstance(dirs, str): return [dirs] return list(set(dirs)) class Config: def __init__( self, app: Union["ASGIApplication", Callable, str], host: str = "127.0.0.1", port: int = 8000, uds: Optional[str] = None, fd: Optional[int] = None, loop: LoopSetupType = "auto", http: Union[Type[asyncio.Protocol], HTTPProtocolType] = "auto", ws: Union[Type[asyncio.Protocol], WSProtocolType] = "auto", ws_max_size: int = 16 * 1024 * 1024, ws_ping_interval: Optional[float] = 20.0, ws_ping_timeout: Optional[float] = 20.0, ws_per_message_deflate: bool = True, lifespan: LifespanType = "auto", env_file: Optional[Union[str, os.PathLike]] = None, log_config: Optional[Union[Dict[str, Any], str]] = LOGGING_CONFIG, log_level: Optional[Union[str, int]] = None, access_log: bool = True, use_colors: Optional[bool] = None, interface: InterfaceType = "auto", reload: bool = False, reload_dirs: Optional[Union[List[str], str]] = None, reload_delay: float = 0.25, reload_includes: Optional[Union[List[str], str]] = None, reload_excludes: Optional[Union[List[str], str]] = None, workers: Optional[int] = None, proxy_headers: bool = True, server_header: bool = True, date_header: bool = True, forwarded_allow_ips: Optional[Union[List[str], str]] = None, root_path: str = "", limit_concurrency: Optional[int] = None, limit_max_requests: Optional[int] = None, backlog: int = 2048, timeout_keep_alive: int = 5, timeout_notify: int = 30, callback_notify: Optional[Callable[..., Awaitable[None]]] = None, ssl_keyfile: Optional[str] = None, ssl_certfile: Optional[Union[str, os.PathLike]] = None, ssl_keyfile_password: Optional[str] = None, ssl_version: int = SSL_PROTOCOL_VERSION, ssl_cert_reqs: int = ssl.CERT_NONE, ssl_ca_certs: Optional[str] = None, ssl_ciphers: str = "TLSv1", headers: Optional[List[Tuple[str, str]]] = None, factory: bool = False, h11_max_incomplete_event_size: Optional[int] = None, ): self.app = app self.host = host self.port = port self.uds = uds self.fd = fd self.loop = loop self.http = http self.ws = ws self.ws_max_size = ws_max_size self.ws_ping_interval = ws_ping_interval self.ws_ping_timeout = ws_ping_timeout self.ws_per_message_deflate = ws_per_message_deflate self.lifespan = lifespan self.log_config = log_config self.log_level = log_level self.access_log = access_log self.use_colors = use_colors self.interface = interface self.reload = reload self.reload_delay = reload_delay self.workers = workers or 1 self.proxy_headers = proxy_headers self.server_header = server_header self.date_header = date_header self.root_path = root_path self.limit_concurrency = limit_concurrency self.limit_max_requests = limit_max_requests self.backlog = backlog self.timeout_keep_alive = timeout_keep_alive self.timeout_notify = timeout_notify self.callback_notify = callback_notify self.ssl_keyfile = ssl_keyfile self.ssl_certfile = ssl_certfile self.ssl_keyfile_password = ssl_keyfile_password self.ssl_version = ssl_version self.ssl_cert_reqs = ssl_cert_reqs self.ssl_ca_certs = ssl_ca_certs self.ssl_ciphers = ssl_ciphers self.headers: List[Tuple[str, str]] = headers or [] self.encoded_headers: List[Tuple[bytes, bytes]] = [] self.factory = factory self.h11_max_incomplete_event_size = h11_max_incomplete_event_size self.loaded = False self.configure_logging() self.reload_dirs: List[Path] = [] self.reload_dirs_excludes: List[Path] = [] self.reload_includes: List[str] = [] self.reload_excludes: List[str] = [] if ( reload_dirs or reload_includes or reload_excludes ) and not self.should_reload: logger.warning( "Current configuration will not reload as not all conditions are met, " "please refer to documentation." ) if self.should_reload: reload_dirs = _normalize_dirs(reload_dirs) reload_includes = _normalize_dirs(reload_includes) reload_excludes = _normalize_dirs(reload_excludes) self.reload_includes, self.reload_dirs = resolve_reload_patterns( reload_includes, reload_dirs ) self.reload_excludes, self.reload_dirs_excludes = resolve_reload_patterns( reload_excludes, [] ) reload_dirs_tmp = self.reload_dirs.copy() for directory in self.reload_dirs_excludes: for reload_directory in reload_dirs_tmp: if ( directory == reload_directory or directory in reload_directory.parents ): try: self.reload_dirs.remove(reload_directory) except ValueError: pass for pattern in self.reload_excludes: if pattern in self.reload_includes: self.reload_includes.remove(pattern) if not self.reload_dirs: if reload_dirs: logger.warning( "Provided reload directories %s did not contain valid " + "directories, watching current working directory.", reload_dirs, ) self.reload_dirs = [Path(os.getcwd())] logger.info( "Will watch for changes in these directories: %s", sorted(list(map(str, self.reload_dirs))), ) if env_file is not None: from dotenv import load_dotenv logger.info("Loading environment from '%s'", env_file) load_dotenv(dotenv_path=env_file) if workers is None and "WEB_CONCURRENCY" in os.environ: self.workers = int(os.environ["WEB_CONCURRENCY"]) self.forwarded_allow_ips: Union[List[str], str] if forwarded_allow_ips is None: self.forwarded_allow_ips = os.environ.get( "FORWARDED_ALLOW_IPS", "127.0.0.1" ) else: self.forwarded_allow_ips = forwarded_allow_ips if self.reload and self.workers > 1: logger.warning('"workers" flag is ignored when reloading is enabled.') @property def asgi_version(self) -> Literal["2.0", "3.0"]: mapping: Dict[str, Literal["2.0", "3.0"]] = { "asgi2": "2.0", "asgi3": "3.0", "wsgi": "3.0", } return mapping[self.interface] @property def is_ssl(self) -> bool: return bool(self.ssl_keyfile or self.ssl_certfile) @property def use_subprocess(self) -> bool: return bool(self.reload or self.workers > 1) def configure_logging(self) -> None: logging.addLevelName(TRACE_LOG_LEVEL, "TRACE") if self.log_config is not None: if isinstance(self.log_config, dict): if self.use_colors in (True, False): self.log_config["formatters"]["default"][ "use_colors" ] = self.use_colors self.log_config["formatters"]["access"][ "use_colors" ] = self.use_colors logging.config.dictConfig(self.log_config) elif self.log_config.endswith(".json"): with open(self.log_config) as file: loaded_config = json.load(file) logging.config.dictConfig(loaded_config) elif self.log_config.endswith((".yaml", ".yml")): # Install the PyYAML package or the uvicorn[standard] optional # dependencies to enable this functionality. import yaml with open(self.log_config) as file: loaded_config = yaml.safe_load(file) logging.config.dictConfig(loaded_config) else: # See the note about fileConfig() here: # https://docs.python.org/3/library/logging.config.html#configuration-file-format logging.config.fileConfig( self.log_config, disable_existing_loggers=False ) if self.log_level is not None: if isinstance(self.log_level, str): log_level = LOG_LEVELS[self.log_level] else: log_level = self.log_level logging.getLogger("uvicorn.error").setLevel(log_level) logging.getLogger("uvicorn.access").setLevel(log_level) logging.getLogger("uvicorn.asgi").setLevel(log_level) if self.access_log is False: logging.getLogger("uvicorn.access").handlers = [] logging.getLogger("uvicorn.access").propagate = False def load(self) -> None: assert not self.loaded if self.is_ssl: assert self.ssl_certfile self.ssl: Optional[ssl.SSLContext] = create_ssl_context( keyfile=self.ssl_keyfile, certfile=self.ssl_certfile, password=self.ssl_keyfile_password, ssl_version=self.ssl_version, cert_reqs=self.ssl_cert_reqs, ca_certs=self.ssl_ca_certs, ciphers=self.ssl_ciphers, ) else: self.ssl = None encoded_headers = [ (key.lower().encode("latin1"), value.encode("latin1")) for key, value in self.headers ] self.encoded_headers = ( [(b"server", b"uvicorn")] + encoded_headers if b"server" not in dict(encoded_headers) and self.server_header else encoded_headers ) if isinstance(self.http, str): http_protocol_class = import_from_string(HTTP_PROTOCOLS[self.http]) self.http_protocol_class: Type[asyncio.Protocol] = http_protocol_class else: self.http_protocol_class = self.http if isinstance(self.ws, str): ws_protocol_class = import_from_string(WS_PROTOCOLS[self.ws]) self.ws_protocol_class: Optional[Type[asyncio.Protocol]] = ws_protocol_class else: self.ws_protocol_class = self.ws self.lifespan_class = import_from_string(LIFESPAN[self.lifespan]) try: self.loaded_app = import_from_string(self.app) except ImportFromStringError as exc: logger.error("Error loading ASGI app. %s" % exc) sys.exit(1) try: self.loaded_app = self.loaded_app() except TypeError as exc: if self.factory: logger.error("Error loading ASGI app factory: %s", exc) sys.exit(1) else: if not self.factory: logger.warning( "ASGI app factory detected. Using it, " "but please consider setting the --factory flag explicitly." ) if self.interface == "auto": if inspect.isclass(self.loaded_app): use_asgi_3 = hasattr(self.loaded_app, "__await__") elif inspect.isfunction(self.loaded_app): use_asgi_3 = asyncio.iscoroutinefunction(self.loaded_app) else: call = getattr(self.loaded_app, "__call__", None) use_asgi_3 = asyncio.iscoroutinefunction(call) self.interface = "asgi3" if use_asgi_3 else "asgi2" if self.interface == "wsgi": self.loaded_app = WSGIMiddleware(self.loaded_app) self.ws_protocol_class = None elif self.interface == "asgi2": self.loaded_app = ASGI2Middleware(self.loaded_app) if logger.level <= TRACE_LOG_LEVEL: self.loaded_app = MessageLoggerMiddleware(self.loaded_app) if self.proxy_headers: self.loaded_app = ProxyHeadersMiddleware( self.loaded_app, trusted_hosts=self.forwarded_allow_ips ) self.loaded = True def setup_event_loop(self) -> None: loop_setup: Optional[Callable] = import_from_string(LOOP_SETUPS[self.loop]) if loop_setup is not None: loop_setup(use_subprocess=self.use_subprocess) def bind_socket(self) -> socket.socket: logger_args: List[Union[str, int]] if self.uds: # pragma: py-win32 path = self.uds sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.bind(path) uds_perms = 0o666 os.chmod(self.uds, uds_perms) except OSError as exc: logger.error(exc) sys.exit(1) message = "Uvicorn running on unix socket %s (Press CTRL+C to quit)" sock_name_format = "%s" color_message = ( "Uvicorn running on " + click.style(sock_name_format, bold=True) + " (Press CTRL+C to quit)" ) logger_args = [self.uds] elif self.fd: # pragma: py-win32 sock = socket.fromfd(self.fd, socket.AF_UNIX, socket.SOCK_STREAM) message = "Uvicorn running on socket %s (Press CTRL+C to quit)" fd_name_format = "%s" color_message = ( "Uvicorn running on " + click.style(fd_name_format, bold=True) + " (Press CTRL+C to quit)" ) logger_args = [sock.getsockname()] else: family = socket.AF_INET addr_format = "%s://%s:%d" if self.host and ":" in self.host: # pragma: py-win32 # It's an IPv6 address. family = socket.AF_INET6 addr_format = "%s://[%s]:%d" sock = socket.socket(family=family) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: sock.bind((self.host, self.port)) except OSError as exc: logger.error(exc) sys.exit(1) message = f"Uvicorn running on {addr_format} (Press CTRL+C to quit)" color_message = ( "Uvicorn running on " + click.style(addr_format, bold=True) + " (Press CTRL+C to quit)" ) protocol_name = "https" if self.is_ssl else "http" logger_args = [protocol_name, self.host, sock.getsockname()[1]] logger.info(message, *logger_args, extra={"color_message": color_message}) sock.set_inheritable(True) return sock @property def should_reload(self) -> bool: return isinstance(self.app, str) and self.reload
21,334
Python
35.658076
104
0.57392
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/_types.py
import types import typing # WSGI Environ = typing.MutableMapping[str, typing.Any] ExcInfo = typing.Tuple[ typing.Type[BaseException], BaseException, typing.Optional[types.TracebackType] ] StartResponse = typing.Callable[ [str, typing.Iterable[typing.Tuple[str, str]], typing.Optional[ExcInfo]], None ] WSGIApp = typing.Callable[ [Environ, StartResponse], typing.Union[typing.Iterable[bytes], BaseException] ]
423
Python
27.266665
83
0.761229
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/__init__.py
from uvicorn.config import Config from uvicorn.main import Server, main, run __version__ = "0.21.1" __all__ = ["main", "run", "Config", "Server"]
147
Python
23.666663
45
0.653061
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/server.py
import asyncio import logging import os import platform import signal import socket import sys import threading import time from email.utils import formatdate from types import FrameType from typing import TYPE_CHECKING, List, Optional, Sequence, Set, Tuple, Union import click from uvicorn.config import Config if TYPE_CHECKING: from uvicorn.protocols.http.h11_impl import H11Protocol from uvicorn.protocols.http.httptools_impl import HttpToolsProtocol from uvicorn.protocols.websockets.websockets_impl import WebSocketProtocol from uvicorn.protocols.websockets.wsproto_impl import WSProtocol Protocols = Union[H11Protocol, HttpToolsProtocol, WSProtocol, WebSocketProtocol] HANDLED_SIGNALS = ( signal.SIGINT, # Unix signal 2. Sent by Ctrl+C. signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`. ) logger = logging.getLogger("uvicorn.error") class ServerState: """ Shared servers state that is available between all protocol instances. """ def __init__(self) -> None: self.total_requests = 0 self.connections: Set["Protocols"] = set() self.tasks: Set[asyncio.Task] = set() self.default_headers: List[Tuple[bytes, bytes]] = [] class Server: def __init__(self, config: Config) -> None: self.config = config self.server_state = ServerState() self.started = False self.should_exit = False self.force_exit = False self.last_notified = 0.0 def run(self, sockets: Optional[List[socket.socket]] = None) -> None: self.config.setup_event_loop() return asyncio.run(self.serve(sockets=sockets)) async def serve(self, sockets: Optional[List[socket.socket]] = None) -> None: process_id = os.getpid() config = self.config if not config.loaded: config.load() self.lifespan = config.lifespan_class(config) self.install_signal_handlers() message = "Started server process [%d]" color_message = "Started server process [" + click.style("%d", fg="cyan") + "]" logger.info(message, process_id, extra={"color_message": color_message}) await self.startup(sockets=sockets) if self.should_exit: return await self.main_loop() await self.shutdown(sockets=sockets) message = "Finished server process [%d]" color_message = "Finished server process [" + click.style("%d", fg="cyan") + "]" logger.info(message, process_id, extra={"color_message": color_message}) async def startup(self, sockets: Optional[List[socket.socket]] = None) -> None: await self.lifespan.startup() if self.lifespan.should_exit: self.should_exit = True return config = self.config def create_protocol( _loop: Optional[asyncio.AbstractEventLoop] = None, ) -> asyncio.Protocol: return config.http_protocol_class( # type: ignore[call-arg] config=config, server_state=self.server_state, app_state=self.lifespan.state, _loop=_loop, ) loop = asyncio.get_running_loop() listeners: Sequence[socket.SocketType] if sockets is not None: # Explicitly passed a list of open sockets. # We use this when the server is run from a Gunicorn worker. def _share_socket( sock: socket.SocketType, ) -> socket.SocketType: # pragma py-linux pragma: py-darwin # Windows requires the socket be explicitly shared across # multiple workers (processes). from socket import fromshare # type: ignore[attr-defined] sock_data = sock.share(os.getpid()) # type: ignore[attr-defined] return fromshare(sock_data) self.servers = [] for sock in sockets: if config.workers > 1 and platform.system() == "Windows": sock = _share_socket( # type: ignore[assignment] sock ) # pragma py-linux pragma: py-darwin server = await loop.create_server( create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog ) self.servers.append(server) listeners = sockets elif config.fd is not None: # pragma: py-win32 # Use an existing socket, from a file descriptor. sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM) server = await loop.create_server( create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog ) assert server.sockets is not None # mypy listeners = server.sockets self.servers = [server] elif config.uds is not None: # pragma: py-win32 # Create a socket using UNIX domain socket. uds_perms = 0o666 if os.path.exists(config.uds): uds_perms = os.stat(config.uds).st_mode server = await loop.create_unix_server( create_protocol, path=config.uds, ssl=config.ssl, backlog=config.backlog ) os.chmod(config.uds, uds_perms) assert server.sockets is not None # mypy listeners = server.sockets self.servers = [server] else: # Standard case. Create a socket from a host/port pair. try: server = await loop.create_server( create_protocol, host=config.host, port=config.port, ssl=config.ssl, backlog=config.backlog, ) except OSError as exc: logger.error(exc) await self.lifespan.shutdown() sys.exit(1) assert server.sockets is not None listeners = server.sockets self.servers = [server] if sockets is None: self._log_started_message(listeners) else: # We're most likely running multiple workers, so a message has already been # logged by `config.bind_socket()`. pass self.started = True def _log_started_message(self, listeners: Sequence[socket.SocketType]) -> None: config = self.config if config.fd is not None: # pragma: py-win32 sock = listeners[0] logger.info( "Uvicorn running on socket %s (Press CTRL+C to quit)", sock.getsockname(), ) elif config.uds is not None: # pragma: py-win32 logger.info( "Uvicorn running on unix socket %s (Press CTRL+C to quit)", config.uds ) else: addr_format = "%s://%s:%d" host = "0.0.0.0" if config.host is None else config.host if ":" in host: # It's an IPv6 address. addr_format = "%s://[%s]:%d" port = config.port if port == 0: port = listeners[0].getsockname()[1] protocol_name = "https" if config.ssl else "http" message = f"Uvicorn running on {addr_format} (Press CTRL+C to quit)" color_message = ( "Uvicorn running on " + click.style(addr_format, bold=True) + " (Press CTRL+C to quit)" ) logger.info( message, protocol_name, host, port, extra={"color_message": color_message}, ) async def main_loop(self) -> None: counter = 0 should_exit = await self.on_tick(counter) while not should_exit: counter += 1 counter = counter % 864000 await asyncio.sleep(0.1) should_exit = await self.on_tick(counter) async def on_tick(self, counter: int) -> bool: # Update the default headers, once per second. if counter % 10 == 0: current_time = time.time() current_date = formatdate(current_time, usegmt=True).encode() if self.config.date_header: date_header = [(b"date", current_date)] else: date_header = [] self.server_state.default_headers = ( date_header + self.config.encoded_headers ) # Callback to `callback_notify` once every `timeout_notify` seconds. if self.config.callback_notify is not None: if current_time - self.last_notified > self.config.timeout_notify: self.last_notified = current_time await self.config.callback_notify() # Determine if we should exit. if self.should_exit: return True if self.config.limit_max_requests is not None: return self.server_state.total_requests >= self.config.limit_max_requests return False async def shutdown(self, sockets: Optional[List[socket.socket]] = None) -> None: logger.info("Shutting down") # Stop accepting new connections. for server in self.servers: server.close() for sock in sockets or []: sock.close() for server in self.servers: await server.wait_closed() # Request shutdown on all existing connections. for connection in list(self.server_state.connections): connection.shutdown() await asyncio.sleep(0.1) # Wait for existing connections to finish sending responses. if self.server_state.connections and not self.force_exit: msg = "Waiting for connections to close. (CTRL+C to force quit)" logger.info(msg) while self.server_state.connections and not self.force_exit: await asyncio.sleep(0.1) # Wait for existing tasks to complete. if self.server_state.tasks and not self.force_exit: msg = "Waiting for background tasks to complete. (CTRL+C to force quit)" logger.info(msg) while self.server_state.tasks and not self.force_exit: await asyncio.sleep(0.1) # Send the lifespan shutdown event, and wait for application shutdown. if not self.force_exit: await self.lifespan.shutdown() def install_signal_handlers(self) -> None: if threading.current_thread() is not threading.main_thread(): # Signals can only be listened to from the main thread. return loop = asyncio.get_event_loop() try: for sig in HANDLED_SIGNALS: loop.add_signal_handler(sig, self.handle_exit, sig, None) except NotImplementedError: # pragma: no cover # Windows for sig in HANDLED_SIGNALS: signal.signal(sig, self.handle_exit) def handle_exit(self, sig: int, frame: Optional[FrameType]) -> None: if self.should_exit and sig == signal.SIGINT: self.force_exit = True else: self.should_exit = True
11,312
Python
34.687697
88
0.572401
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/_subprocess.py
""" Some light wrappers around Python's multiprocessing, to deal with cleanly starting child processes. """ import multiprocessing import os import sys from multiprocessing.context import SpawnProcess from socket import socket from typing import Callable, List, Optional from uvicorn.config import Config multiprocessing.allow_connection_pickling() spawn = multiprocessing.get_context("spawn") def get_subprocess( config: Config, target: Callable[..., None], sockets: List[socket], ) -> SpawnProcess: """ Called in the parent process, to instantiate a new child process instance. The child is not yet started at this point. * config - The Uvicorn configuration instance. * target - A callable that accepts a list of sockets. In practice this will be the `Server.run()` method. * sockets - A list of sockets to pass to the server. Sockets are bound once by the parent process, and then passed to the child processes. """ # We pass across the stdin fileno, and reopen it in the child process. # This is required for some debugging environments. stdin_fileno: Optional[int] try: stdin_fileno = sys.stdin.fileno() except OSError: stdin_fileno = None kwargs = { "config": config, "target": target, "sockets": sockets, "stdin_fileno": stdin_fileno, } return spawn.Process(target=subprocess_started, kwargs=kwargs) def subprocess_started( config: Config, target: Callable[..., None], sockets: List[socket], stdin_fileno: Optional[int], ) -> None: """ Called when the child process starts. * config - The Uvicorn configuration instance. * target - A callable that accepts a list of sockets. In practice this will be the `Server.run()` method. * sockets - A list of sockets to pass to the server. Sockets are bound once by the parent process, and then passed to the child processes. * stdin_fileno - The file number of sys.stdin, so that it can be reattached to the child process. """ # Re-open stdin. if stdin_fileno is not None: sys.stdin = os.fdopen(stdin_fileno) # Logging needs to be setup again for each child. config.configure_logging() # Now we can call into `Server.run(sockets=sockets)` target(sockets=sockets)
2,403
Python
30.220779
79
0.669996
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/__main__.py
import uvicorn if __name__ == "__main__": uvicorn.main()
62
Python
11.599998
26
0.548387
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/workers.py
import asyncio import logging import signal import sys from typing import Any, Dict from gunicorn.arbiter import Arbiter from gunicorn.workers.base import Worker from uvicorn.config import Config from uvicorn.main import Server class UvicornWorker(Worker): """ A worker class for Gunicorn that interfaces with an ASGI consumer callable, rather than a WSGI callable. """ CONFIG_KWARGS: Dict[str, Any] = {"loop": "auto", "http": "auto"} def __init__(self, *args: Any, **kwargs: Any) -> None: super(UvicornWorker, self).__init__(*args, **kwargs) logger = logging.getLogger("uvicorn.error") logger.handlers = self.log.error_log.handlers logger.setLevel(self.log.error_log.level) logger.propagate = False logger = logging.getLogger("uvicorn.access") logger.handlers = self.log.access_log.handlers logger.setLevel(self.log.access_log.level) logger.propagate = False config_kwargs: dict = { "app": None, "log_config": None, "timeout_keep_alive": self.cfg.keepalive, "timeout_notify": self.timeout, "callback_notify": self.callback_notify, "limit_max_requests": self.max_requests, "forwarded_allow_ips": self.cfg.forwarded_allow_ips, } if self.cfg.is_ssl: ssl_kwargs = { "ssl_keyfile": self.cfg.ssl_options.get("keyfile"), "ssl_certfile": self.cfg.ssl_options.get("certfile"), "ssl_keyfile_password": self.cfg.ssl_options.get("password"), "ssl_version": self.cfg.ssl_options.get("ssl_version"), "ssl_cert_reqs": self.cfg.ssl_options.get("cert_reqs"), "ssl_ca_certs": self.cfg.ssl_options.get("ca_certs"), "ssl_ciphers": self.cfg.ssl_options.get("ciphers"), } config_kwargs.update(ssl_kwargs) if self.cfg.settings["backlog"].value: config_kwargs["backlog"] = self.cfg.settings["backlog"].value config_kwargs.update(self.CONFIG_KWARGS) self.config = Config(**config_kwargs) def init_process(self) -> None: self.config.setup_event_loop() super(UvicornWorker, self).init_process() def init_signals(self) -> None: # Reset signals so Gunicorn doesn't swallow subprocess return codes # other signals are set up by Server.install_signal_handlers() # See: https://github.com/encode/uvicorn/issues/894 for s in self.SIGNALS: signal.signal(s, signal.SIG_DFL) signal.signal(signal.SIGUSR1, self.handle_usr1) # Don't let SIGUSR1 disturb active requests by interrupting system calls signal.siginterrupt(signal.SIGUSR1, False) def _install_sigquit_handler(self) -> None: """Install a SIGQUIT handler on workers. - https://github.com/encode/uvicorn/issues/1116 - https://github.com/benoitc/gunicorn/issues/2604 """ loop = asyncio.get_running_loop() loop.add_signal_handler(signal.SIGQUIT, self.handle_exit, signal.SIGQUIT, None) async def _serve(self) -> None: self.config.app = self.wsgi server = Server(config=self.config) self._install_sigquit_handler() await server.serve(sockets=self.sockets) if not server.started: sys.exit(Arbiter.WORKER_BOOT_ERROR) def run(self) -> None: return asyncio.run(self._serve()) async def callback_notify(self) -> None: self.notify() class UvicornH11Worker(UvicornWorker): CONFIG_KWARGS = {"loop": "asyncio", "http": "h11"}
3,675
Python
33.679245
87
0.622585
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/logging.py
import http import logging import sys from copy import copy from typing import Optional import click if sys.version_info < (3, 8): # pragma: py-gte-38 from typing_extensions import Literal else: # pragma: py-lt-38 from typing import Literal TRACE_LOG_LEVEL = 5 class ColourizedFormatter(logging.Formatter): """ A custom log formatter class that: * Outputs the LOG_LEVEL with an appropriate color. * If a log call includes an `extras={"color_message": ...}` it will be used for formatting the output, instead of the plain text message. """ level_name_colors = { TRACE_LOG_LEVEL: lambda level_name: click.style(str(level_name), fg="blue"), logging.DEBUG: lambda level_name: click.style(str(level_name), fg="cyan"), logging.INFO: lambda level_name: click.style(str(level_name), fg="green"), logging.WARNING: lambda level_name: click.style(str(level_name), fg="yellow"), logging.ERROR: lambda level_name: click.style(str(level_name), fg="red"), logging.CRITICAL: lambda level_name: click.style( str(level_name), fg="bright_red" ), } def __init__( self, fmt: Optional[str] = None, datefmt: Optional[str] = None, style: Literal["%", "{", "$"] = "%", use_colors: Optional[bool] = None, ): if use_colors in (True, False): self.use_colors = use_colors else: self.use_colors = sys.stdout.isatty() super().__init__(fmt=fmt, datefmt=datefmt, style=style) def color_level_name(self, level_name: str, level_no: int) -> str: def default(level_name: str) -> str: return str(level_name) # pragma: no cover func = self.level_name_colors.get(level_no, default) return func(level_name) def should_use_colors(self) -> bool: return True # pragma: no cover def formatMessage(self, record: logging.LogRecord) -> str: recordcopy = copy(record) levelname = recordcopy.levelname seperator = " " * (8 - len(recordcopy.levelname)) if self.use_colors: levelname = self.color_level_name(levelname, recordcopy.levelno) if "color_message" in recordcopy.__dict__: recordcopy.msg = recordcopy.__dict__["color_message"] recordcopy.__dict__["message"] = recordcopy.getMessage() recordcopy.__dict__["levelprefix"] = levelname + ":" + seperator return super().formatMessage(recordcopy) class DefaultFormatter(ColourizedFormatter): def should_use_colors(self) -> bool: return sys.stderr.isatty() # pragma: no cover class AccessFormatter(ColourizedFormatter): status_code_colours = { 1: lambda code: click.style(str(code), fg="bright_white"), 2: lambda code: click.style(str(code), fg="green"), 3: lambda code: click.style(str(code), fg="yellow"), 4: lambda code: click.style(str(code), fg="red"), 5: lambda code: click.style(str(code), fg="bright_red"), } def get_status_code(self, status_code: int) -> str: try: status_phrase = http.HTTPStatus(status_code).phrase except ValueError: status_phrase = "" status_and_phrase = "%s %s" % (status_code, status_phrase) if self.use_colors: def default(code: int) -> str: return status_and_phrase # pragma: no cover func = self.status_code_colours.get(status_code // 100, default) return func(status_and_phrase) return status_and_phrase def formatMessage(self, record: logging.LogRecord) -> str: recordcopy = copy(record) ( client_addr, method, full_path, http_version, status_code, ) = recordcopy.args # type: ignore[misc] status_code = self.get_status_code(int(status_code)) # type: ignore[arg-type] request_line = "%s %s HTTP/%s" % (method, full_path, http_version) if self.use_colors: request_line = click.style(request_line, bold=True) recordcopy.__dict__.update( { "client_addr": client_addr, "request_line": request_line, "status_code": status_code, } ) return super().formatMessage(recordcopy)
4,397
Python
34.756097
86
0.595861
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/loops/auto.py
def auto_loop_setup(use_subprocess: bool = False) -> None: try: import uvloop # noqa except ImportError: # pragma: no cover from uvicorn.loops.asyncio import asyncio_setup as loop_setup loop_setup(use_subprocess=use_subprocess) else: # pragma: no cover from uvicorn.loops.uvloop import uvloop_setup uvloop_setup(use_subprocess=use_subprocess)
400
Python
32.416664
69
0.6775
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/loops/uvloop.py
import asyncio import uvloop def uvloop_setup(use_subprocess: bool = False) -> None: asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
148
Python
17.624998
59
0.75
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/loops/asyncio.py
import asyncio import logging import sys logger = logging.getLogger("uvicorn.error") def asyncio_setup(use_subprocess: bool = False) -> None: # pragma: no cover if sys.version_info >= (3, 8) and sys.platform == "win32" and use_subprocess: asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
327
Python
28.818179
81
0.727829
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/lifespan/on.py
import asyncio import logging from asyncio import Queue from typing import TYPE_CHECKING, Any, Dict, Union from uvicorn import Config if TYPE_CHECKING: from asgiref.typing import ( LifespanScope, LifespanShutdownCompleteEvent, LifespanShutdownEvent, LifespanShutdownFailedEvent, LifespanStartupCompleteEvent, LifespanStartupEvent, LifespanStartupFailedEvent, ) LifespanReceiveMessage = Union[LifespanStartupEvent, LifespanShutdownEvent] LifespanSendMessage = Union[ LifespanStartupFailedEvent, LifespanShutdownFailedEvent, LifespanStartupCompleteEvent, LifespanShutdownCompleteEvent, ] STATE_TRANSITION_ERROR = "Got invalid state transition on lifespan protocol." class LifespanOn: def __init__(self, config: Config) -> None: if not config.loaded: config.load() self.config = config self.logger = logging.getLogger("uvicorn.error") self.startup_event = asyncio.Event() self.shutdown_event = asyncio.Event() self.receive_queue: "Queue[LifespanReceiveMessage]" = asyncio.Queue() self.error_occured = False self.startup_failed = False self.shutdown_failed = False self.should_exit = False self.state: Dict[str, Any] = {} async def startup(self) -> None: self.logger.info("Waiting for application startup.") loop = asyncio.get_event_loop() main_lifespan_task = loop.create_task(self.main()) # noqa: F841 # Keep a hard reference to prevent garbage collection # See https://github.com/encode/uvicorn/pull/972 startup_event: LifespanStartupEvent = {"type": "lifespan.startup"} await self.receive_queue.put(startup_event) await self.startup_event.wait() if self.startup_failed or (self.error_occured and self.config.lifespan == "on"): self.logger.error("Application startup failed. Exiting.") self.should_exit = True else: self.logger.info("Application startup complete.") async def shutdown(self) -> None: if self.error_occured: return self.logger.info("Waiting for application shutdown.") shutdown_event: LifespanShutdownEvent = {"type": "lifespan.shutdown"} await self.receive_queue.put(shutdown_event) await self.shutdown_event.wait() if self.shutdown_failed or ( self.error_occured and self.config.lifespan == "on" ): self.logger.error("Application shutdown failed. Exiting.") self.should_exit = True else: self.logger.info("Application shutdown complete.") async def main(self) -> None: try: app = self.config.loaded_app scope: LifespanScope = { # type: ignore[typeddict-item] "type": "lifespan", "asgi": {"version": self.config.asgi_version, "spec_version": "2.0"}, "state": self.state, } await app(scope, self.receive, self.send) except BaseException as exc: self.asgi = None self.error_occured = True if self.startup_failed or self.shutdown_failed: return if self.config.lifespan == "auto": msg = "ASGI 'lifespan' protocol appears unsupported." self.logger.info(msg) else: msg = "Exception in 'lifespan' protocol\n" self.logger.error(msg, exc_info=exc) finally: self.startup_event.set() self.shutdown_event.set() async def send(self, message: "LifespanSendMessage") -> None: assert message["type"] in ( "lifespan.startup.complete", "lifespan.startup.failed", "lifespan.shutdown.complete", "lifespan.shutdown.failed", ) if message["type"] == "lifespan.startup.complete": assert not self.startup_event.is_set(), STATE_TRANSITION_ERROR assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR self.startup_event.set() elif message["type"] == "lifespan.startup.failed": assert not self.startup_event.is_set(), STATE_TRANSITION_ERROR assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR self.startup_event.set() self.startup_failed = True if message.get("message"): self.logger.error(message["message"]) elif message["type"] == "lifespan.shutdown.complete": assert self.startup_event.is_set(), STATE_TRANSITION_ERROR assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR self.shutdown_event.set() elif message["type"] == "lifespan.shutdown.failed": assert self.startup_event.is_set(), STATE_TRANSITION_ERROR assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR self.shutdown_event.set() self.shutdown_failed = True if message.get("message"): self.logger.error(message["message"]) async def receive(self) -> "LifespanReceiveMessage": return await self.receive_queue.get()
5,312
Python
36.95
88
0.613705
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/lifespan/off.py
from typing import Any, Dict from uvicorn import Config class LifespanOff: def __init__(self, config: Config) -> None: self.should_exit = False self.state: Dict[str, Any] = {} async def startup(self) -> None: pass async def shutdown(self) -> None: pass
302
Python
17.937499
47
0.602649
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/middleware/proxy_headers.py
""" This middleware can be used when a known proxy is fronting the application, and is trusted to be properly setting the `X-Forwarded-Proto` and `X-Forwarded-For` headers with the connecting client information. Modifies the `client` and `scheme` information so that they reference the connecting client, rather that the connecting proxy. https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies """ from typing import TYPE_CHECKING, List, Optional, Tuple, Union, cast if TYPE_CHECKING: from asgiref.typing import ( ASGI3Application, ASGIReceiveCallable, ASGISendCallable, HTTPScope, Scope, WebSocketScope, ) class ProxyHeadersMiddleware: def __init__( self, app: "ASGI3Application", trusted_hosts: Union[List[str], str] = "127.0.0.1", ) -> None: self.app = app if isinstance(trusted_hosts, str): self.trusted_hosts = {item.strip() for item in trusted_hosts.split(",")} else: self.trusted_hosts = set(trusted_hosts) self.always_trust = "*" in self.trusted_hosts def get_trusted_client_host( self, x_forwarded_for_hosts: List[str] ) -> Optional[str]: if self.always_trust: return x_forwarded_for_hosts[0] for host in reversed(x_forwarded_for_hosts): if host not in self.trusted_hosts: return host return None async def __call__( self, scope: "Scope", receive: "ASGIReceiveCallable", send: "ASGISendCallable" ) -> None: if scope["type"] in ("http", "websocket"): scope = cast(Union["HTTPScope", "WebSocketScope"], scope) client_addr: Optional[Tuple[str, int]] = scope.get("client") client_host = client_addr[0] if client_addr else None if self.always_trust or client_host in self.trusted_hosts: headers = dict(scope["headers"]) if b"x-forwarded-proto" in headers: # Determine if the incoming request was http or https based on # the X-Forwarded-Proto header. x_forwarded_proto = headers[b"x-forwarded-proto"].decode("latin1") scope["scheme"] = x_forwarded_proto.strip() # type: ignore[index] if b"x-forwarded-for" in headers: # Determine the client address from the last trusted IP in the # X-Forwarded-For header. We've lost the connecting client's port # information by now, so only include the host. x_forwarded_for = headers[b"x-forwarded-for"].decode("latin1") x_forwarded_for_hosts = [ item.strip() for item in x_forwarded_for.split(",") ] host = self.get_trusted_client_host(x_forwarded_for_hosts) port = 0 scope["client"] = (host, port) # type: ignore[arg-type] return await self.app(scope, receive, send)
3,072
Python
37.898734
86
0.588867
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/middleware/wsgi.py
import asyncio import concurrent.futures import io import sys import warnings from collections import deque from typing import TYPE_CHECKING, Deque, Iterable, Optional, Tuple if TYPE_CHECKING: from asgiref.typing import ( ASGIReceiveCallable, ASGIReceiveEvent, ASGISendCallable, ASGISendEvent, HTTPRequestEvent, HTTPResponseBodyEvent, HTTPResponseStartEvent, HTTPScope, ) from uvicorn._types import Environ, ExcInfo, StartResponse, WSGIApp def build_environ( scope: "HTTPScope", message: "ASGIReceiveEvent", body: io.BytesIO ) -> Environ: """ Builds a scope and request message into a WSGI environ object. """ environ = { "REQUEST_METHOD": scope["method"], "SCRIPT_NAME": "", "PATH_INFO": scope["path"].encode("utf8").decode("latin1"), "QUERY_STRING": scope["query_string"].decode("ascii"), "SERVER_PROTOCOL": "HTTP/%s" % scope["http_version"], "wsgi.version": (1, 0), "wsgi.url_scheme": scope.get("scheme", "http"), "wsgi.input": body, "wsgi.errors": sys.stdout, "wsgi.multithread": True, "wsgi.multiprocess": True, "wsgi.run_once": False, } # Get server name and port - required in WSGI, not in ASGI server = scope.get("server") if server is None: server = ("localhost", 80) environ["SERVER_NAME"] = server[0] environ["SERVER_PORT"] = server[1] # Get client IP address client = scope.get("client") if client is not None: environ["REMOTE_ADDR"] = client[0] # Go through headers and make them into environ entries for name, value in scope.get("headers", []): name_str: str = name.decode("latin1") if name_str == "content-length": corrected_name = "CONTENT_LENGTH" elif name_str == "content-type": corrected_name = "CONTENT_TYPE" else: corrected_name = "HTTP_%s" % name_str.upper().replace("-", "_") # HTTPbis say only ASCII chars are allowed in headers, but we latin1 # just in case value_str: str = value.decode("latin1") if corrected_name in environ: corrected_name_environ = environ[corrected_name] assert isinstance(corrected_name_environ, str) value_str = corrected_name_environ + "," + value_str environ[corrected_name] = value_str return environ class _WSGIMiddleware: def __init__(self, app: WSGIApp, workers: int = 10): warnings.warn( "Uvicorn's native WSGI implementation is deprecated, you " "should switch to a2wsgi (`pip install a2wsgi`).", DeprecationWarning, ) self.app = app self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=workers) async def __call__( self, scope: "HTTPScope", receive: "ASGIReceiveCallable", send: "ASGISendCallable", ) -> None: assert scope["type"] == "http" instance = WSGIResponder(self.app, self.executor, scope) await instance(receive, send) class WSGIResponder: def __init__( self, app: WSGIApp, executor: concurrent.futures.ThreadPoolExecutor, scope: "HTTPScope", ): self.app = app self.executor = executor self.scope = scope self.status = None self.response_headers = None self.send_event = asyncio.Event() self.send_queue: Deque[Optional["ASGISendEvent"]] = deque() self.loop: asyncio.AbstractEventLoop = asyncio.get_event_loop() self.response_started = False self.exc_info: Optional[ExcInfo] = None async def __call__( self, receive: "ASGIReceiveCallable", send: "ASGISendCallable" ) -> None: message: HTTPRequestEvent = await receive() # type: ignore[assignment] body = io.BytesIO(message.get("body", b"")) more_body = message.get("more_body", False) if more_body: body.seek(0, io.SEEK_END) while more_body: body_message: "HTTPRequestEvent" = ( await receive() # type: ignore[assignment] ) body.write(body_message.get("body", b"")) more_body = body_message.get("more_body", False) body.seek(0) environ = build_environ(self.scope, message, body) self.loop = asyncio.get_event_loop() wsgi = self.loop.run_in_executor( self.executor, self.wsgi, environ, self.start_response ) sender = self.loop.create_task(self.sender(send)) try: await asyncio.wait_for(wsgi, None) finally: self.send_queue.append(None) self.send_event.set() await asyncio.wait_for(sender, None) if self.exc_info is not None: raise self.exc_info[0].with_traceback(self.exc_info[1], self.exc_info[2]) async def sender(self, send: "ASGISendCallable") -> None: while True: if self.send_queue: message = self.send_queue.popleft() if message is None: return await send(message) else: await self.send_event.wait() self.send_event.clear() def start_response( self, status: str, response_headers: Iterable[Tuple[str, str]], exc_info: Optional[ExcInfo] = None, ) -> None: self.exc_info = exc_info if not self.response_started: self.response_started = True status_code_str, _ = status.split(" ", 1) status_code = int(status_code_str) headers = [ (name.encode("ascii"), value.encode("ascii")) for name, value in response_headers ] http_response_start_event: HTTPResponseStartEvent = { "type": "http.response.start", "status": status_code, "headers": headers, } self.send_queue.append(http_response_start_event) self.loop.call_soon_threadsafe(self.send_event.set) def wsgi(self, environ: Environ, start_response: StartResponse) -> None: for chunk in self.app(environ, start_response): # type: ignore response_body: HTTPResponseBodyEvent = { "type": "http.response.body", "body": chunk, "more_body": True, } self.send_queue.append(response_body) self.loop.call_soon_threadsafe(self.send_event.set) empty_body: HTTPResponseBodyEvent = { "type": "http.response.body", "body": b"", "more_body": False, } self.send_queue.append(empty_body) self.loop.call_soon_threadsafe(self.send_event.set) try: from a2wsgi import WSGIMiddleware except ModuleNotFoundError: WSGIMiddleware = _WSGIMiddleware # type: ignore[misc, assignment]
7,078
Python
33.871921
85
0.578695
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/middleware/asgi2.py
import typing if typing.TYPE_CHECKING: from asgiref.typing import ( ASGI2Application, ASGIReceiveCallable, ASGISendCallable, Scope, ) class ASGI2Middleware: def __init__(self, app: "ASGI2Application"): self.app = app async def __call__( self, scope: "Scope", receive: "ASGIReceiveCallable", send: "ASGISendCallable" ) -> None: instance = self.app(scope) await instance(receive, send)
472
Python
21.523808
86
0.616525
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/middleware/message_logger.py
import logging from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from asgiref.typing import ( ASGI3Application, ASGIReceiveCallable, ASGIReceiveEvent, ASGISendCallable, ASGISendEvent, WWWScope, ) from uvicorn.logging import TRACE_LOG_LEVEL PLACEHOLDER_FORMAT = { "body": "<{length} bytes>", "bytes": "<{length} bytes>", "text": "<{length} chars>", "headers": "<...>", } def message_with_placeholders(message: Any) -> Any: """ Return an ASGI message, with any body-type content omitted and replaced with a placeholder. """ new_message = message.copy() for attr in PLACEHOLDER_FORMAT.keys(): if message.get(attr) is not None: content = message[attr] placeholder = PLACEHOLDER_FORMAT[attr].format(length=len(content)) new_message[attr] = placeholder return new_message class MessageLoggerMiddleware: def __init__(self, app: "ASGI3Application"): self.task_counter = 0 self.app = app self.logger = logging.getLogger("uvicorn.asgi") def trace(message: Any, *args: Any, **kwargs: Any) -> None: self.logger.log(TRACE_LOG_LEVEL, message, *args, **kwargs) self.logger.trace = trace # type: ignore async def __call__( self, scope: "WWWScope", receive: "ASGIReceiveCallable", send: "ASGISendCallable", ) -> None: self.task_counter += 1 task_counter = self.task_counter client = scope.get("client") prefix = "%s:%d - ASGI" % (client[0], client[1]) if client else "ASGI" async def inner_receive() -> "ASGIReceiveEvent": message = await receive() logged_message = message_with_placeholders(message) log_text = "%s [%d] Receive %s" self.logger.trace( # type: ignore log_text, prefix, task_counter, logged_message ) return message async def inner_send(message: "ASGISendEvent") -> None: logged_message = message_with_placeholders(message) log_text = "%s [%d] Send %s" self.logger.trace( # type: ignore log_text, prefix, task_counter, logged_message ) await send(message) logged_scope = message_with_placeholders(scope) log_text = "%s [%d] Started scope=%s" self.logger.trace(log_text, prefix, task_counter, logged_scope) # type: ignore try: await self.app(scope, inner_receive, inner_send) except BaseException as exc: log_text = "%s [%d] Raised exception" self.logger.trace(log_text, prefix, task_counter) # type: ignore raise exc from None else: log_text = "%s [%d] Completed" self.logger.trace(log_text, prefix, task_counter) # type: ignore
2,925
Python
31.511111
87
0.583248
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/supervisors/statreload.py
import logging from pathlib import Path from socket import socket from typing import Callable, Dict, Iterator, List, Optional from uvicorn.config import Config from uvicorn.supervisors.basereload import BaseReload logger = logging.getLogger("uvicorn.error") class StatReload(BaseReload): def __init__( self, config: Config, target: Callable[[Optional[List[socket]]], None], sockets: List[socket], ) -> None: super().__init__(config, target, sockets) self.reloader_name = "StatReload" self.mtimes: Dict[Path, float] = {} if config.reload_excludes or config.reload_includes: logger.warning( "--reload-include and --reload-exclude have no effect unless " "watchfiles is installed." ) def should_restart(self) -> Optional[List[Path]]: self.pause() for file in self.iter_py_files(): try: mtime = file.stat().st_mtime except OSError: # pragma: nocover continue old_time = self.mtimes.get(file) if old_time is None: self.mtimes[file] = mtime continue elif mtime > old_time: return [file] return None def restart(self) -> None: self.mtimes = {} return super().restart() def iter_py_files(self) -> Iterator[Path]: for reload_dir in self.config.reload_dirs: for path in list(reload_dir.rglob("*.py")): yield path.resolve()
1,580
Python
28.277777
78
0.572785
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/supervisors/watchfilesreload.py
from pathlib import Path from socket import socket from typing import Callable, List, Optional from watchfiles import watch from uvicorn.config import Config from uvicorn.supervisors.basereload import BaseReload class FileFilter: def __init__(self, config: Config): default_includes = ["*.py"] self.includes = [ default for default in default_includes if default not in config.reload_excludes ] self.includes.extend(config.reload_includes) self.includes = list(set(self.includes)) default_excludes = [".*", ".py[cod]", ".sw.*", "~*"] self.excludes = [ default for default in default_excludes if default not in config.reload_includes ] self.exclude_dirs = [] for e in config.reload_excludes: p = Path(e) try: is_dir = p.is_dir() except OSError: # pragma: no cover # gets raised on Windows for values like "*.py" is_dir = False if is_dir: self.exclude_dirs.append(p) else: self.excludes.append(e) self.excludes = list(set(self.excludes)) def __call__(self, path: Path) -> bool: for include_pattern in self.includes: if path.match(include_pattern): for exclude_dir in self.exclude_dirs: if exclude_dir in path.parents: return False for exclude_pattern in self.excludes: if path.match(exclude_pattern): return False return True return False class WatchFilesReload(BaseReload): def __init__( self, config: Config, target: Callable[[Optional[List[socket]]], None], sockets: List[socket], ) -> None: super().__init__(config, target, sockets) self.reloader_name = "WatchFiles" self.reload_dirs = [] for directory in config.reload_dirs: if Path.cwd() not in directory.parents: self.reload_dirs.append(directory) if Path.cwd() not in self.reload_dirs: self.reload_dirs.append(Path.cwd()) self.watch_filter = FileFilter(config) self.watcher = watch( *self.reload_dirs, watch_filter=None, stop_event=self.should_exit, # using yield_on_timeout here mostly to make sure tests don't # hang forever, won't affect the class's behavior yield_on_timeout=True, ) def should_restart(self) -> Optional[List[Path]]: changes = next(self.watcher) if changes: unique_paths = {Path(c[1]) for c in changes} return [p for p in unique_paths if self.watch_filter(p)] return None
2,902
Python
31.255555
73
0.554445
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/supervisors/__init__.py
from typing import TYPE_CHECKING, Type from uvicorn.supervisors.basereload import BaseReload from uvicorn.supervisors.multiprocess import Multiprocess if TYPE_CHECKING: ChangeReload: Type[BaseReload] else: try: from uvicorn.supervisors.watchfilesreload import ( WatchFilesReload as ChangeReload, ) except ImportError: # pragma: no cover try: from uvicorn.supervisors.watchgodreload import ( WatchGodReload as ChangeReload, ) except ImportError: from uvicorn.supervisors.statreload import StatReload as ChangeReload __all__ = ["Multiprocess", "ChangeReload"]
670
Python
29.499999
81
0.689552
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/supervisors/basereload.py
import logging import os import signal import threading from pathlib import Path from socket import socket from types import FrameType from typing import Callable, Iterator, List, Optional import click from uvicorn._subprocess import get_subprocess from uvicorn.config import Config HANDLED_SIGNALS = ( signal.SIGINT, # Unix signal 2. Sent by Ctrl+C. signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`. ) logger = logging.getLogger("uvicorn.error") class BaseReload: def __init__( self, config: Config, target: Callable[[Optional[List[socket]]], None], sockets: List[socket], ) -> None: self.config = config self.target = target self.sockets = sockets self.should_exit = threading.Event() self.pid = os.getpid() self.reloader_name: Optional[str] = None def signal_handler(self, sig: int, frame: Optional[FrameType]) -> None: """ A signal handler that is registered with the parent process. """ self.should_exit.set() def run(self) -> None: self.startup() for changes in self: if changes: logger.warning( "%s detected changes in %s. Reloading...", self.reloader_name, ", ".join(map(_display_path, changes)), ) self.restart() self.shutdown() def pause(self) -> None: if self.should_exit.wait(self.config.reload_delay): raise StopIteration() def __iter__(self) -> Iterator[Optional[List[Path]]]: return self def __next__(self) -> Optional[List[Path]]: return self.should_restart() def startup(self) -> None: message = f"Started reloader process [{self.pid}] using {self.reloader_name}" color_message = "Started reloader process [{}] using {}".format( click.style(str(self.pid), fg="cyan", bold=True), click.style(str(self.reloader_name), fg="cyan", bold=True), ) logger.info(message, extra={"color_message": color_message}) for sig in HANDLED_SIGNALS: signal.signal(sig, self.signal_handler) self.process = get_subprocess( config=self.config, target=self.target, sockets=self.sockets ) self.process.start() def restart(self) -> None: self.process.terminate() self.process.join() self.process = get_subprocess( config=self.config, target=self.target, sockets=self.sockets ) self.process.start() def shutdown(self) -> None: self.process.terminate() self.process.join() for sock in self.sockets: sock.close() message = "Stopping reloader process [{}]".format(str(self.pid)) color_message = "Stopping reloader process [{}]".format( click.style(str(self.pid), fg="cyan", bold=True) ) logger.info(message, extra={"color_message": color_message}) def should_restart(self) -> Optional[List[Path]]: raise NotImplementedError("Reload strategies should override should_restart()") def _display_path(path: Path) -> str: try: return f"'{path.relative_to(Path.cwd())}'" except ValueError: return f"'{path}'"
3,341
Python
28.315789
87
0.595331
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/supervisors/multiprocess.py
import logging import os import signal import threading from multiprocessing.context import SpawnProcess from socket import socket from types import FrameType from typing import Callable, List, Optional import click from uvicorn._subprocess import get_subprocess from uvicorn.config import Config HANDLED_SIGNALS = ( signal.SIGINT, # Unix signal 2. Sent by Ctrl+C. signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`. ) logger = logging.getLogger("uvicorn.error") class Multiprocess: def __init__( self, config: Config, target: Callable[[Optional[List[socket]]], None], sockets: List[socket], ) -> None: self.config = config self.target = target self.sockets = sockets self.processes: List[SpawnProcess] = [] self.should_exit = threading.Event() self.pid = os.getpid() def signal_handler(self, sig: int, frame: Optional[FrameType]) -> None: """ A signal handler that is registered with the parent process. """ self.should_exit.set() def run(self) -> None: self.startup() self.should_exit.wait() self.shutdown() def startup(self) -> None: message = "Started parent process [{}]".format(str(self.pid)) color_message = "Started parent process [{}]".format( click.style(str(self.pid), fg="cyan", bold=True) ) logger.info(message, extra={"color_message": color_message}) for sig in HANDLED_SIGNALS: signal.signal(sig, self.signal_handler) for idx in range(self.config.workers): process = get_subprocess( config=self.config, target=self.target, sockets=self.sockets ) process.start() self.processes.append(process) def shutdown(self) -> None: for process in self.processes: process.terminate() process.join() message = "Stopping parent process [{}]".format(str(self.pid)) color_message = "Stopping parent process [{}]".format( click.style(str(self.pid), fg="cyan", bold=True) ) logger.info(message, extra={"color_message": color_message})
2,231
Python
28.76
76
0.614971
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/supervisors/watchgodreload.py
import logging import warnings from pathlib import Path from socket import socket from typing import TYPE_CHECKING, Callable, Dict, List, Optional from watchgod import DefaultWatcher from uvicorn.config import Config from uvicorn.supervisors.basereload import BaseReload if TYPE_CHECKING: import os DirEntry = os.DirEntry[str] logger = logging.getLogger("uvicorn.error") class CustomWatcher(DefaultWatcher): def __init__(self, root_path: Path, config: Config): default_includes = ["*.py"] self.includes = [ default for default in default_includes if default not in config.reload_excludes ] self.includes.extend(config.reload_includes) self.includes = list(set(self.includes)) default_excludes = [".*", ".py[cod]", ".sw.*", "~*"] self.excludes = [ default for default in default_excludes if default not in config.reload_includes ] self.excludes.extend(config.reload_excludes) self.excludes = list(set(self.excludes)) self.watched_dirs: Dict[str, bool] = {} self.watched_files: Dict[str, bool] = {} self.dirs_includes = set(config.reload_dirs) self.dirs_excludes = set(config.reload_dirs_excludes) self.resolved_root = root_path super().__init__(str(root_path)) def should_watch_file(self, entry: "DirEntry") -> bool: cached_result = self.watched_files.get(entry.path) if cached_result is not None: return cached_result entry_path = Path(entry) # cwd is not verified through should_watch_dir, so we need to verify here if entry_path.parent == Path.cwd() and not Path.cwd() in self.dirs_includes: self.watched_files[entry.path] = False return False for include_pattern in self.includes: if entry_path.match(include_pattern): for exclude_pattern in self.excludes: if entry_path.match(exclude_pattern): self.watched_files[entry.path] = False return False self.watched_files[entry.path] = True return True self.watched_files[entry.path] = False return False def should_watch_dir(self, entry: "DirEntry") -> bool: cached_result = self.watched_dirs.get(entry.path) if cached_result is not None: return cached_result entry_path = Path(entry) if entry_path in self.dirs_excludes: self.watched_dirs[entry.path] = False return False for exclude_pattern in self.excludes: if entry_path.match(exclude_pattern): is_watched = False if entry_path in self.dirs_includes: is_watched = True for directory in self.dirs_includes: if directory in entry_path.parents: is_watched = True if is_watched: logger.debug( "WatchGodReload detected a new excluded dir '%s' in '%s'; " "Adding to exclude list.", entry_path.relative_to(self.resolved_root), str(self.resolved_root), ) self.watched_dirs[entry.path] = False self.dirs_excludes.add(entry_path) return False if entry_path in self.dirs_includes: self.watched_dirs[entry.path] = True return True for directory in self.dirs_includes: if directory in entry_path.parents: self.watched_dirs[entry.path] = True return True for include_pattern in self.includes: if entry_path.match(include_pattern): logger.info( "WatchGodReload detected a new reload dir '%s' in '%s'; " "Adding to watch list.", str(entry_path.relative_to(self.resolved_root)), str(self.resolved_root), ) self.dirs_includes.add(entry_path) self.watched_dirs[entry.path] = True return True self.watched_dirs[entry.path] = False return False class WatchGodReload(BaseReload): def __init__( self, config: Config, target: Callable[[Optional[List[socket]]], None], sockets: List[socket], ) -> None: warnings.warn( '"watchgod" is depreciated, you should switch ' "to watchfiles (`pip install watchfiles`).", DeprecationWarning, ) super().__init__(config, target, sockets) self.reloader_name = "WatchGod" self.watchers = [] reload_dirs = [] for directory in config.reload_dirs: if Path.cwd() not in directory.parents: reload_dirs.append(directory) if Path.cwd() not in reload_dirs: reload_dirs.append(Path.cwd()) for w in reload_dirs: self.watchers.append(CustomWatcher(w.resolve(), self.config)) def should_restart(self) -> Optional[List[Path]]: self.pause() for watcher in self.watchers: change = watcher.check() if change != set(): return list({Path(c[1]) for c in change}) return None
5,491
Python
33.54088
84
0.562193
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/utils.py
import asyncio import urllib.parse from typing import TYPE_CHECKING, Optional, Tuple if TYPE_CHECKING: from asgiref.typing import WWWScope def get_remote_addr(transport: asyncio.Transport) -> Optional[Tuple[str, int]]: socket_info = transport.get_extra_info("socket") if socket_info is not None: try: info = socket_info.getpeername() return (str(info[0]), int(info[1])) if isinstance(info, tuple) else None except OSError: # pragma: no cover # This case appears to inconsistently occur with uvloop # bound to a unix domain socket. return None info = transport.get_extra_info("peername") if info is not None and isinstance(info, (list, tuple)) and len(info) == 2: return (str(info[0]), int(info[1])) return None def get_local_addr(transport: asyncio.Transport) -> Optional[Tuple[str, int]]: socket_info = transport.get_extra_info("socket") if socket_info is not None: info = socket_info.getsockname() return (str(info[0]), int(info[1])) if isinstance(info, tuple) else None info = transport.get_extra_info("sockname") if info is not None and isinstance(info, (list, tuple)) and len(info) == 2: return (str(info[0]), int(info[1])) return None def is_ssl(transport: asyncio.Transport) -> bool: return bool(transport.get_extra_info("sslcontext")) def get_client_addr(scope: "WWWScope") -> str: client = scope.get("client") if not client: return "" return "%s:%d" % client def get_path_with_query_string(scope: "WWWScope") -> str: path_with_query_string = urllib.parse.quote(scope["path"]) if scope["query_string"]: path_with_query_string = "{}?{}".format( path_with_query_string, scope["query_string"].decode("ascii") ) return path_with_query_string
1,876
Python
32.517857
84
0.642857
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/websockets/wsproto_impl.py
import asyncio import logging import sys import typing from urllib.parse import unquote import wsproto from wsproto import ConnectionType, events from wsproto.connection import ConnectionState from wsproto.extensions import Extension, PerMessageDeflate from wsproto.utilities import RemoteProtocolError from uvicorn.config import Config from uvicorn.logging import TRACE_LOG_LEVEL from uvicorn.protocols.utils import ( get_local_addr, get_path_with_query_string, get_remote_addr, is_ssl, ) from uvicorn.server import ServerState if typing.TYPE_CHECKING: from asgiref.typing import ( ASGISendEvent, WebSocketAcceptEvent, WebSocketCloseEvent, WebSocketConnectEvent, WebSocketDisconnectEvent, WebSocketReceiveEvent, WebSocketScope, WebSocketSendEvent, ) WebSocketEvent = typing.Union[ "WebSocketReceiveEvent", "WebSocketDisconnectEvent", "WebSocketConnectEvent", ] if sys.version_info < (3, 8): # pragma: py-gte-38 from typing_extensions import Literal else: # pragma: py-lt-38 from typing import Literal class WSProtocol(asyncio.Protocol): def __init__( self, config: Config, server_state: ServerState, app_state: typing.Dict[str, typing.Any], _loop: typing.Optional[asyncio.AbstractEventLoop] = None, ) -> None: if not config.loaded: config.load() self.config = config self.app = config.loaded_app self.loop = _loop or asyncio.get_event_loop() self.logger = logging.getLogger("uvicorn.error") self.root_path = config.root_path self.app_state = app_state # Shared server state self.connections = server_state.connections self.tasks = server_state.tasks self.default_headers = server_state.default_headers # Connection state self.transport: asyncio.Transport = None # type: ignore[assignment] self.server: typing.Optional[typing.Tuple[str, int]] = None self.client: typing.Optional[typing.Tuple[str, int]] = None self.scheme: Literal["wss", "ws"] = None # type: ignore[assignment] # WebSocket state self.queue: asyncio.Queue["WebSocketEvent"] = asyncio.Queue() self.handshake_complete = False self.close_sent = False self.conn = wsproto.WSConnection(connection_type=ConnectionType.SERVER) self.read_paused = False self.writable = asyncio.Event() self.writable.set() # Buffers self.bytes = b"" self.text = "" # Protocol interface def connection_made( # type: ignore[override] self, transport: asyncio.Transport ) -> None: self.connections.add(self) self.transport = transport self.server = get_local_addr(transport) self.client = get_remote_addr(transport) self.scheme = "wss" if is_ssl(transport) else "ws" if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection made", prefix) def connection_lost(self, exc: typing.Optional[Exception]) -> None: code = 1005 if self.handshake_complete else 1006 self.queue.put_nowait({"type": "websocket.disconnect", "code": code}) self.connections.remove(self) if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection lost", prefix) self.handshake_complete = True if exc is None: self.transport.close() def eof_received(self) -> None: pass def data_received(self, data: bytes) -> None: try: self.conn.receive_data(data) except RemoteProtocolError as err: # TODO: Remove `type: ignore` when wsproto fixes the type annotation. self.transport.write(self.conn.send(err.event_hint)) # type: ignore[arg-type] # noqa: E501 self.transport.close() else: self.handle_events() def handle_events(self) -> None: for event in self.conn.events(): if isinstance(event, events.Request): self.handle_connect(event) elif isinstance(event, events.TextMessage): self.handle_text(event) elif isinstance(event, events.BytesMessage): self.handle_bytes(event) elif isinstance(event, events.CloseConnection): self.handle_close(event) elif isinstance(event, events.Ping): self.handle_ping(event) def pause_writing(self) -> None: """ Called by the transport when the write buffer exceeds the high water mark. """ self.writable.clear() def resume_writing(self) -> None: """ Called by the transport when the write buffer drops below the low water mark. """ self.writable.set() def shutdown(self) -> None: if self.handshake_complete: self.queue.put_nowait({"type": "websocket.disconnect", "code": 1012}) output = self.conn.send(wsproto.events.CloseConnection(code=1012)) self.transport.write(output) else: self.send_500_response() self.transport.close() def on_task_complete(self, task: asyncio.Task) -> None: self.tasks.discard(task) # Event handlers def handle_connect(self, event: events.Request) -> None: headers = [(b"host", event.host.encode())] headers += [(key.lower(), value) for key, value in event.extra_headers] raw_path, _, query_string = event.target.partition("?") self.scope: "WebSocketScope" = { # type: ignore[typeddict-item] "type": "websocket", "asgi": {"version": self.config.asgi_version, "spec_version": "2.3"}, "http_version": "1.1", "scheme": self.scheme, "server": self.server, "client": self.client, "root_path": self.root_path, "path": unquote(raw_path), "raw_path": raw_path.encode("ascii"), "query_string": query_string.encode("ascii"), "headers": headers, "subprotocols": event.subprotocols, "extensions": None, "state": self.app_state.copy(), } self.queue.put_nowait({"type": "websocket.connect"}) task = self.loop.create_task(self.run_asgi()) task.add_done_callback(self.on_task_complete) self.tasks.add(task) def handle_text(self, event: events.TextMessage) -> None: self.text += event.data if event.message_finished: msg: "WebSocketReceiveEvent" = { # type: ignore[typeddict-item] "type": "websocket.receive", "text": self.text, } self.queue.put_nowait(msg) self.text = "" if not self.read_paused: self.read_paused = True self.transport.pause_reading() def handle_bytes(self, event: events.BytesMessage) -> None: self.bytes += event.data # todo: we may want to guard the size of self.bytes and self.text if event.message_finished: msg: "WebSocketReceiveEvent" = { # type: ignore[typeddict-item] "type": "websocket.receive", "bytes": self.bytes, } self.queue.put_nowait(msg) self.bytes = b"" if not self.read_paused: self.read_paused = True self.transport.pause_reading() def handle_close(self, event: events.CloseConnection) -> None: if self.conn.state == ConnectionState.REMOTE_CLOSING: self.transport.write(self.conn.send(event.response())) self.queue.put_nowait({"type": "websocket.disconnect", "code": event.code}) self.transport.close() def handle_ping(self, event: events.Ping) -> None: self.transport.write(self.conn.send(event.response())) def send_500_response(self) -> None: headers = [ (b"content-type", b"text/plain; charset=utf-8"), (b"connection", b"close"), ] output = self.conn.send( wsproto.events.RejectConnection( status_code=500, headers=headers, has_body=True ) ) output += self.conn.send( wsproto.events.RejectData(data=b"Internal Server Error") ) self.transport.write(output) async def run_asgi(self) -> None: try: result = await self.app(self.scope, self.receive, self.send) except BaseException: self.logger.exception("Exception in ASGI application\n") if not self.handshake_complete: self.send_500_response() self.transport.close() else: if not self.handshake_complete: msg = "ASGI callable returned without completing handshake." self.logger.error(msg) self.send_500_response() self.transport.close() elif result is not None: msg = "ASGI callable should return None, but returned '%s'." self.logger.error(msg, result) self.transport.close() async def send(self, message: "ASGISendEvent") -> None: await self.writable.wait() message_type = message["type"] if not self.handshake_complete: if message_type == "websocket.accept": message = typing.cast("WebSocketAcceptEvent", message) self.logger.info( '%s - "WebSocket %s" [accepted]', self.scope["client"], get_path_with_query_string(self.scope), ) subprotocol = message.get("subprotocol") extra_headers = self.default_headers + list(message.get("headers", [])) extensions: typing.List[Extension] = [] if self.config.ws_per_message_deflate: extensions.append(PerMessageDeflate()) if not self.transport.is_closing(): self.handshake_complete = True output = self.conn.send( wsproto.events.AcceptConnection( subprotocol=subprotocol, extensions=extensions, extra_headers=extra_headers, ) ) self.transport.write(output) elif message_type == "websocket.close": self.queue.put_nowait({"type": "websocket.disconnect", "code": 1006}) self.logger.info( '%s - "WebSocket %s" 403', self.scope["client"], get_path_with_query_string(self.scope), ) self.handshake_complete = True self.close_sent = True event = events.RejectConnection(status_code=403, headers=[]) output = self.conn.send(event) self.transport.write(output) self.transport.close() else: msg = ( "Expected ASGI message 'websocket.accept' or 'websocket.close', " "but got '%s'." ) raise RuntimeError(msg % message_type) elif not self.close_sent: if message_type == "websocket.send": message = typing.cast("WebSocketSendEvent", message) bytes_data = message.get("bytes") text_data = message.get("text") data = text_data if bytes_data is None else bytes_data output = self.conn.send( wsproto.events.Message(data=data) # type: ignore[type-var] ) if not self.transport.is_closing(): self.transport.write(output) elif message_type == "websocket.close": message = typing.cast("WebSocketCloseEvent", message) self.close_sent = True code = message.get("code", 1000) reason = message.get("reason", "") or "" self.queue.put_nowait({"type": "websocket.disconnect", "code": code}) output = self.conn.send( wsproto.events.CloseConnection(code=code, reason=reason) ) if not self.transport.is_closing(): self.transport.write(output) self.transport.close() else: msg = ( "Expected ASGI message 'websocket.send' or 'websocket.close'," " but got '%s'." ) raise RuntimeError(msg % message_type) else: msg = "Unexpected ASGI message '%s', after sending 'websocket.close'." raise RuntimeError(msg % message_type) async def receive(self) -> "WebSocketEvent": message = await self.queue.get() if self.read_paused and self.queue.empty(): self.read_paused = False self.transport.resume_reading() return message
13,486
Python
36.673184
104
0.565846
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/websockets/auto.py
import asyncio import typing AutoWebSocketsProtocol: typing.Optional[typing.Callable[..., asyncio.Protocol]] try: import websockets # noqa except ImportError: # pragma: no cover try: import wsproto # noqa except ImportError: AutoWebSocketsProtocol = None else: from uvicorn.protocols.websockets.wsproto_impl import WSProtocol AutoWebSocketsProtocol = WSProtocol else: from uvicorn.protocols.websockets.websockets_impl import WebSocketProtocol AutoWebSocketsProtocol = WebSocketProtocol
548
Python
26.449999
79
0.74635
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/websockets/websockets_impl.py
import asyncio import http import logging import sys from typing import ( TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union, cast, ) from urllib.parse import unquote import websockets from websockets.datastructures import Headers from websockets.exceptions import ConnectionClosed from websockets.extensions.permessage_deflate import ServerPerMessageDeflateFactory from websockets.legacy.server import HTTPResponse from websockets.server import WebSocketServerProtocol from websockets.typing import Subprotocol from uvicorn.config import Config from uvicorn.logging import TRACE_LOG_LEVEL from uvicorn.protocols.utils import ( get_local_addr, get_path_with_query_string, get_remote_addr, is_ssl, ) from uvicorn.server import ServerState if sys.version_info < (3, 8): # pragma: py-gte-38 from typing_extensions import Literal else: # pragma: py-lt-38 from typing import Literal if TYPE_CHECKING: from asgiref.typing import ( ASGISendEvent, WebSocketAcceptEvent, WebSocketCloseEvent, WebSocketConnectEvent, WebSocketDisconnectEvent, WebSocketReceiveEvent, WebSocketScope, WebSocketSendEvent, ) class Server: closing = False def register(self, ws: WebSocketServerProtocol) -> None: pass def unregister(self, ws: WebSocketServerProtocol) -> None: pass def is_serving(self) -> bool: return not self.closing class WebSocketProtocol(WebSocketServerProtocol): extra_headers: List[Tuple[str, str]] def __init__( self, config: Config, server_state: ServerState, app_state: Dict[str, Any], _loop: Optional[asyncio.AbstractEventLoop] = None, ): if not config.loaded: config.load() self.config = config self.app = config.loaded_app self.loop = _loop or asyncio.get_event_loop() self.root_path = config.root_path self.app_state = app_state # Shared server state self.connections = server_state.connections self.tasks = server_state.tasks # Connection state self.transport: asyncio.Transport = None # type: ignore[assignment] self.server: Optional[Tuple[str, int]] = None self.client: Optional[Tuple[str, int]] = None self.scheme: Literal["wss", "ws"] = None # type: ignore[assignment] # Connection events self.scope: WebSocketScope = None # type: ignore[assignment] self.handshake_started_event = asyncio.Event() self.handshake_completed_event = asyncio.Event() self.closed_event = asyncio.Event() self.initial_response: Optional[HTTPResponse] = None self.connect_sent = False self.lost_connection_before_handshake = False self.accepted_subprotocol: Optional[Subprotocol] = None self.ws_server: Server = Server() # type: ignore[assignment] extensions = [] if self.config.ws_per_message_deflate: extensions.append(ServerPerMessageDeflateFactory()) super().__init__( ws_handler=self.ws_handler, ws_server=self.ws_server, # type: ignore[arg-type] max_size=self.config.ws_max_size, ping_interval=self.config.ws_ping_interval, ping_timeout=self.config.ws_ping_timeout, extensions=extensions, logger=logging.getLogger("uvicorn.error"), ) self.server_header = None self.extra_headers = [ (name.decode("latin-1"), value.decode("latin-1")) for name, value in server_state.default_headers ] def connection_made( # type: ignore[override] self, transport: asyncio.Transport ) -> None: self.connections.add(self) self.transport = transport self.server = get_local_addr(transport) self.client = get_remote_addr(transport) self.scheme = "wss" if is_ssl(transport) else "ws" if self.logger.isEnabledFor(TRACE_LOG_LEVEL): prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection made", prefix) super().connection_made(transport) def connection_lost(self, exc: Optional[Exception]) -> None: self.connections.remove(self) if self.logger.isEnabledFor(TRACE_LOG_LEVEL): prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection lost", prefix) self.lost_connection_before_handshake = ( not self.handshake_completed_event.is_set() ) self.handshake_completed_event.set() super().connection_lost(exc) if exc is None: self.transport.close() def shutdown(self) -> None: self.ws_server.closing = True if self.handshake_completed_event.is_set(): self.fail_connection(1012) else: self.send_500_response() self.transport.close() def on_task_complete(self, task: asyncio.Task) -> None: self.tasks.discard(task) async def process_request( self, path: str, headers: Headers ) -> Optional[HTTPResponse]: """ This hook is called to determine if the websocket should return an HTTP response and close. Our behavior here is to start the ASGI application, and then wait for either `accept` or `close` in order to determine if we should close the connection. """ path_portion, _, query_string = path.partition("?") websockets.legacy.handshake.check_request(headers) subprotocols = [] for header in headers.get_all("Sec-WebSocket-Protocol"): subprotocols.extend([token.strip() for token in header.split(",")]) asgi_headers = [ (name.encode("ascii"), value.encode("ascii", errors="surrogateescape")) for name, value in headers.raw_items() ] self.scope = { # type: ignore[typeddict-item] "type": "websocket", "asgi": {"version": self.config.asgi_version, "spec_version": "2.3"}, "http_version": "1.1", "scheme": self.scheme, "server": self.server, "client": self.client, "root_path": self.root_path, "path": unquote(path_portion), "raw_path": path_portion.encode("ascii"), "query_string": query_string.encode("ascii"), "headers": asgi_headers, "subprotocols": subprotocols, "state": self.app_state.copy(), } task = self.loop.create_task(self.run_asgi()) task.add_done_callback(self.on_task_complete) self.tasks.add(task) await self.handshake_started_event.wait() return self.initial_response def process_subprotocol( self, headers: Headers, available_subprotocols: Optional[Sequence[Subprotocol]] ) -> Optional[Subprotocol]: """ We override the standard 'process_subprotocol' behavior here so that we return whatever subprotocol is sent in the 'accept' message. """ return self.accepted_subprotocol def send_500_response(self) -> None: msg = b"Internal Server Error" content = [ b"HTTP/1.1 500 Internal Server Error\r\n" b"content-type: text/plain; charset=utf-8\r\n", b"content-length: " + str(len(msg)).encode("ascii") + b"\r\n", b"connection: close\r\n", b"\r\n", msg, ] self.transport.write(b"".join(content)) # Allow handler task to terminate cleanly, as websockets doesn't cancel it by # itself (see https://github.com/encode/uvicorn/issues/920) self.handshake_started_event.set() async def ws_handler( # type: ignore[override] self, protocol: WebSocketServerProtocol, path: str ) -> Any: """ This is the main handler function for the 'websockets' implementation to call into. We just wait for close then return, and instead allow 'send' and 'receive' events to drive the flow. """ self.handshake_completed_event.set() await self.closed_event.wait() async def run_asgi(self) -> None: """ Wrapper around the ASGI callable, handling exceptions and unexpected termination states. """ try: result = await self.app(self.scope, self.asgi_receive, self.asgi_send) except BaseException as exc: self.closed_event.set() msg = "Exception in ASGI application\n" self.logger.error(msg, exc_info=exc) if not self.handshake_started_event.is_set(): self.send_500_response() else: await self.handshake_completed_event.wait() self.transport.close() else: self.closed_event.set() if not self.handshake_started_event.is_set(): msg = "ASGI callable returned without sending handshake." self.logger.error(msg) self.send_500_response() self.transport.close() elif result is not None: msg = "ASGI callable should return None, but returned '%s'." self.logger.error(msg, result) await self.handshake_completed_event.wait() self.transport.close() async def asgi_send(self, message: "ASGISendEvent") -> None: message_type = message["type"] if not self.handshake_started_event.is_set(): if message_type == "websocket.accept": message = cast("WebSocketAcceptEvent", message) self.logger.info( '%s - "WebSocket %s" [accepted]', self.scope["client"], get_path_with_query_string(self.scope), ) self.initial_response = None self.accepted_subprotocol = cast( Optional[Subprotocol], message.get("subprotocol") ) if "headers" in message: self.extra_headers.extend( # ASGI spec requires bytes # But for compatibility we need to convert it to strings (name.decode("latin-1"), value.decode("latin-1")) for name, value in message["headers"] ) self.handshake_started_event.set() elif message_type == "websocket.close": message = cast("WebSocketCloseEvent", message) self.logger.info( '%s - "WebSocket %s" 403', self.scope["client"], get_path_with_query_string(self.scope), ) self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b"") self.handshake_started_event.set() self.closed_event.set() else: msg = ( "Expected ASGI message 'websocket.accept' or 'websocket.close', " "but got '%s'." ) raise RuntimeError(msg % message_type) elif not self.closed_event.is_set(): await self.handshake_completed_event.wait() if message_type == "websocket.send": message = cast("WebSocketSendEvent", message) bytes_data = message.get("bytes") text_data = message.get("text") data = text_data if bytes_data is None else bytes_data await self.send(data) # type: ignore[arg-type] elif message_type == "websocket.close": message = cast("WebSocketCloseEvent", message) code = message.get("code", 1000) reason = message.get("reason", "") or "" await self.close(code, reason) self.closed_event.set() else: msg = ( "Expected ASGI message 'websocket.send' or 'websocket.close'," " but got '%s'." ) raise RuntimeError(msg % message_type) else: msg = "Unexpected ASGI message '%s', after sending 'websocket.close'." raise RuntimeError(msg % message_type) async def asgi_receive( self, ) -> Union[ "WebSocketDisconnectEvent", "WebSocketConnectEvent", "WebSocketReceiveEvent" ]: if not self.connect_sent: self.connect_sent = True return {"type": "websocket.connect"} await self.handshake_completed_event.wait() if self.lost_connection_before_handshake: # If the handshake failed or the app closed before handshake completion, # use 1006 Abnormal Closure. return {"type": "websocket.disconnect", "code": 1006} if self.closed_event.is_set(): return {"type": "websocket.disconnect", "code": 1005} try: data = await self.recv() except ConnectionClosed as exc: self.closed_event.set() if self.ws_server.closing: return {"type": "websocket.disconnect", "code": 1012} return {"type": "websocket.disconnect", "code": exc.code} msg: WebSocketReceiveEvent = { # type: ignore[typeddict-item] "type": "websocket.receive" } if isinstance(data, str): msg["text"] = data else: msg["bytes"] = data return msg
13,784
Python
34.898437
87
0.580963
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/http/flow_control.py
import asyncio import typing if typing.TYPE_CHECKING: from asgiref.typing import ( ASGIReceiveCallable, ASGISendCallable, HTTPResponseBodyEvent, HTTPResponseStartEvent, Scope, ) CLOSE_HEADER = (b"connection", b"close") HIGH_WATER_LIMIT = 65536 class FlowControl: def __init__(self, transport: asyncio.Transport) -> None: self._transport = transport self.read_paused = False self.write_paused = False self._is_writable_event = asyncio.Event() self._is_writable_event.set() async def drain(self) -> None: await self._is_writable_event.wait() def pause_reading(self) -> None: if not self.read_paused: self.read_paused = True self._transport.pause_reading() def resume_reading(self) -> None: if self.read_paused: self.read_paused = False self._transport.resume_reading() def pause_writing(self) -> None: if not self.write_paused: self.write_paused = True self._is_writable_event.clear() def resume_writing(self) -> None: if self.write_paused: self.write_paused = False self._is_writable_event.set() async def service_unavailable( scope: "Scope", receive: "ASGIReceiveCallable", send: "ASGISendCallable" ) -> None: response_start: "HTTPResponseStartEvent" = { "type": "http.response.start", "status": 503, "headers": [ (b"content-type", b"text/plain; charset=utf-8"), (b"connection", b"close"), ], } await send(response_start) response_body: "HTTPResponseBodyEvent" = { "type": "http.response.body", "body": b"Service Unavailable", "more_body": False, } await send(response_body)
1,844
Python
25.73913
76
0.59436
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/http/auto.py
import asyncio from typing import Type AutoHTTPProtocol: Type[asyncio.Protocol] try: import httptools # noqa except ImportError: # pragma: no cover from uvicorn.protocols.http.h11_impl import H11Protocol AutoHTTPProtocol = H11Protocol else: # pragma: no cover from uvicorn.protocols.http.httptools_impl import HttpToolsProtocol AutoHTTPProtocol = HttpToolsProtocol
391
Python
25.133332
71
0.780051
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/http/h11_impl.py
import asyncio import http import logging import sys from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union, cast, ) from urllib.parse import unquote import h11 from h11._connection import DEFAULT_MAX_INCOMPLETE_EVENT_SIZE from uvicorn.config import Config from uvicorn.logging import TRACE_LOG_LEVEL from uvicorn.protocols.http.flow_control import ( CLOSE_HEADER, HIGH_WATER_LIMIT, FlowControl, service_unavailable, ) from uvicorn.protocols.utils import ( get_client_addr, get_local_addr, get_path_with_query_string, get_remote_addr, is_ssl, ) from uvicorn.server import ServerState if sys.version_info < (3, 8): # pragma: py-gte-38 from typing_extensions import Literal else: # pragma: py-lt-38 from typing import Literal if TYPE_CHECKING: from asgiref.typing import ( ASGI3Application, ASGIReceiveEvent, ASGISendEvent, HTTPDisconnectEvent, HTTPRequestEvent, HTTPResponseBodyEvent, HTTPResponseStartEvent, HTTPScope, ) H11Event = Union[ h11.Request, h11.InformationalResponse, h11.Response, h11.Data, h11.EndOfMessage, h11.ConnectionClosed, ] def _get_status_phrase(status_code: int) -> bytes: try: return http.HTTPStatus(status_code).phrase.encode() except ValueError: return b"" STATUS_PHRASES = { status_code: _get_status_phrase(status_code) for status_code in range(100, 600) } class H11Protocol(asyncio.Protocol): def __init__( self, config: Config, server_state: ServerState, app_state: Dict[str, Any], _loop: Optional[asyncio.AbstractEventLoop] = None, ) -> None: if not config.loaded: config.load() self.config = config self.app = config.loaded_app self.loop = _loop or asyncio.get_event_loop() self.logger = logging.getLogger("uvicorn.error") self.access_logger = logging.getLogger("uvicorn.access") self.access_log = self.access_logger.hasHandlers() self.conn = h11.Connection( h11.SERVER, config.h11_max_incomplete_event_size if config.h11_max_incomplete_event_size is not None else DEFAULT_MAX_INCOMPLETE_EVENT_SIZE, ) self.ws_protocol_class = config.ws_protocol_class self.root_path = config.root_path self.limit_concurrency = config.limit_concurrency self.app_state = app_state # Timeouts self.timeout_keep_alive_task: Optional[asyncio.TimerHandle] = None self.timeout_keep_alive = config.timeout_keep_alive # Shared server state self.server_state = server_state self.connections = server_state.connections self.tasks = server_state.tasks # Per-connection state self.transport: asyncio.Transport = None # type: ignore[assignment] self.flow: FlowControl = None # type: ignore[assignment] self.server: Optional[Tuple[str, int]] = None self.client: Optional[Tuple[str, int]] = None self.scheme: Optional[Literal["http", "https"]] = None # Per-request state self.scope: HTTPScope = None # type: ignore[assignment] self.headers: List[Tuple[bytes, bytes]] = None # type: ignore[assignment] self.cycle: RequestResponseCycle = None # type: ignore[assignment] # Protocol interface def connection_made( # type: ignore[override] self, transport: asyncio.Transport ) -> None: self.connections.add(self) self.transport = transport self.flow = FlowControl(transport) self.server = get_local_addr(transport) self.client = get_remote_addr(transport) self.scheme = "https" if is_ssl(transport) else "http" if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sHTTP connection made", prefix) def connection_lost(self, exc: Optional[Exception]) -> None: self.connections.discard(self) if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sHTTP connection lost", prefix) if self.cycle and not self.cycle.response_complete: self.cycle.disconnected = True if self.conn.our_state != h11.ERROR: event = h11.ConnectionClosed() try: self.conn.send(event) except h11.LocalProtocolError: # Premature client disconnect pass if self.cycle is not None: self.cycle.message_event.set() if self.flow is not None: self.flow.resume_writing() if exc is None: self.transport.close() self._unset_keepalive_if_required() def eof_received(self) -> None: pass def _unset_keepalive_if_required(self) -> None: if self.timeout_keep_alive_task is not None: self.timeout_keep_alive_task.cancel() self.timeout_keep_alive_task = None def _get_upgrade(self) -> Optional[bytes]: connection = [] upgrade = None for name, value in self.headers: if name == b"connection": connection = [token.lower().strip() for token in value.split(b",")] if name == b"upgrade": upgrade = value.lower() if b"upgrade" in connection: return upgrade return None def _should_upgrade_to_ws(self) -> bool: if self.ws_protocol_class is None: if self.config.ws == "auto": msg = "Unsupported upgrade request." self.logger.warning(msg) msg = "No supported WebSocket library detected. Please use 'pip install uvicorn[standard]', or install 'websockets' or 'wsproto' manually." # noqa: E501 self.logger.warning(msg) return False return True def data_received(self, data: bytes) -> None: self._unset_keepalive_if_required() self.conn.receive_data(data) self.handle_events() def handle_events(self) -> None: while True: try: event = self.conn.next_event() except h11.RemoteProtocolError: msg = "Invalid HTTP request received." self.logger.warning(msg) self.send_400_response(msg) return event_type = type(event) if event_type is h11.NEED_DATA: break elif event_type is h11.PAUSED: # This case can occur in HTTP pipelining, so we need to # stop reading any more data, and ensure that at the end # of the active request/response cycle we handle any # events that have been buffered up. self.flow.pause_reading() break elif event_type is h11.Request: self.headers = [(key.lower(), value) for key, value in event.headers] raw_path, _, query_string = event.target.partition(b"?") self.scope = { # type: ignore[typeddict-item] "type": "http", "asgi": { "version": self.config.asgi_version, "spec_version": "2.3", }, "http_version": event.http_version.decode("ascii"), "server": self.server, "client": self.client, "scheme": self.scheme, "method": event.method.decode("ascii"), "root_path": self.root_path, "path": unquote(raw_path.decode("ascii")), "raw_path": raw_path, "query_string": query_string, "headers": self.headers, "state": self.app_state.copy(), } upgrade = self._get_upgrade() if upgrade == b"websocket" and self._should_upgrade_to_ws(): self.handle_websocket_upgrade(event) return # Handle 503 responses when 'limit_concurrency' is exceeded. if self.limit_concurrency is not None and ( len(self.connections) >= self.limit_concurrency or len(self.tasks) >= self.limit_concurrency ): app = service_unavailable message = "Exceeded concurrency limit." self.logger.warning(message) else: app = self.app self.cycle = RequestResponseCycle( scope=self.scope, conn=self.conn, transport=self.transport, flow=self.flow, logger=self.logger, access_logger=self.access_logger, access_log=self.access_log, default_headers=self.server_state.default_headers, message_event=asyncio.Event(), on_response=self.on_response_complete, ) task = self.loop.create_task(self.cycle.run_asgi(app)) task.add_done_callback(self.tasks.discard) self.tasks.add(task) elif event_type is h11.Data: if self.conn.our_state is h11.DONE: continue self.cycle.body += event.data if len(self.cycle.body) > HIGH_WATER_LIMIT: self.flow.pause_reading() self.cycle.message_event.set() elif event_type is h11.EndOfMessage: if self.conn.our_state is h11.DONE: self.transport.resume_reading() self.conn.start_next_cycle() continue self.cycle.more_body = False self.cycle.message_event.set() def handle_websocket_upgrade(self, event: H11Event) -> None: if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sUpgrading to WebSocket", prefix) self.connections.discard(self) output = [event.method, b" ", event.target, b" HTTP/1.1\r\n"] for name, value in self.headers: output += [name, b": ", value, b"\r\n"] output.append(b"\r\n") protocol = self.ws_protocol_class( # type: ignore[call-arg, misc] config=self.config, server_state=self.server_state, app_state=self.app_state, ) protocol.connection_made(self.transport) protocol.data_received(b"".join(output)) self.transport.set_protocol(protocol) def send_400_response(self, msg: str) -> None: reason = STATUS_PHRASES[400] headers = [ (b"content-type", b"text/plain; charset=utf-8"), (b"connection", b"close"), ] event = h11.Response(status_code=400, headers=headers, reason=reason) output = self.conn.send(event) self.transport.write(output) event = h11.Data(data=msg.encode("ascii")) output = self.conn.send(event) self.transport.write(output) event = h11.EndOfMessage() output = self.conn.send(event) self.transport.write(output) self.transport.close() def on_response_complete(self) -> None: self.server_state.total_requests += 1 if self.transport.is_closing(): return # Set a short Keep-Alive timeout. self._unset_keepalive_if_required() self.timeout_keep_alive_task = self.loop.call_later( self.timeout_keep_alive, self.timeout_keep_alive_handler ) # Unpause data reads if needed. self.flow.resume_reading() # Unblock any pipelined events. if self.conn.our_state is h11.DONE and self.conn.their_state is h11.DONE: self.conn.start_next_cycle() self.handle_events() def shutdown(self) -> None: """ Called by the server to commence a graceful shutdown. """ if self.cycle is None or self.cycle.response_complete: event = h11.ConnectionClosed() self.conn.send(event) self.transport.close() else: self.cycle.keep_alive = False def pause_writing(self) -> None: """ Called by the transport when the write buffer exceeds the high water mark. """ self.flow.pause_writing() def resume_writing(self) -> None: """ Called by the transport when the write buffer drops below the low water mark. """ self.flow.resume_writing() def timeout_keep_alive_handler(self) -> None: """ Called on a keep-alive connection if no new data is received after a short delay. """ if not self.transport.is_closing(): event = h11.ConnectionClosed() self.conn.send(event) self.transport.close() class RequestResponseCycle: def __init__( self, scope: "HTTPScope", conn: h11.Connection, transport: asyncio.Transport, flow: FlowControl, logger: logging.Logger, access_logger: logging.Logger, access_log: bool, default_headers: List[Tuple[bytes, bytes]], message_event: asyncio.Event, on_response: Callable[..., None], ) -> None: self.scope = scope self.conn = conn self.transport = transport self.flow = flow self.logger = logger self.access_logger = access_logger self.access_log = access_log self.default_headers = default_headers self.message_event = message_event self.on_response = on_response # Connection state self.disconnected = False self.keep_alive = True self.waiting_for_100_continue = conn.they_are_waiting_for_100_continue # Request state self.body = b"" self.more_body = True # Response state self.response_started = False self.response_complete = False # ASGI exception wrapper async def run_asgi(self, app: "ASGI3Application") -> None: try: result = await app( # type: ignore[func-returns-value] self.scope, self.receive, self.send ) except BaseException as exc: msg = "Exception in ASGI application\n" self.logger.error(msg, exc_info=exc) if not self.response_started: await self.send_500_response() else: self.transport.close() else: if result is not None: msg = "ASGI callable should return None, but returned '%s'." self.logger.error(msg, result) self.transport.close() elif not self.response_started and not self.disconnected: msg = "ASGI callable returned without starting response." self.logger.error(msg) await self.send_500_response() elif not self.response_complete and not self.disconnected: msg = "ASGI callable returned without completing response." self.logger.error(msg) self.transport.close() finally: self.on_response = lambda: None async def send_500_response(self) -> None: response_start_event: "HTTPResponseStartEvent" = { "type": "http.response.start", "status": 500, "headers": [ (b"content-type", b"text/plain; charset=utf-8"), (b"connection", b"close"), ], } await self.send(response_start_event) response_body_event: "HTTPResponseBodyEvent" = { "type": "http.response.body", "body": b"Internal Server Error", "more_body": False, } await self.send(response_body_event) # ASGI interface async def send(self, message: "ASGISendEvent") -> None: message_type = message["type"] if self.flow.write_paused and not self.disconnected: await self.flow.drain() if self.disconnected: return if not self.response_started: # Sending response status line and headers if message_type != "http.response.start": msg = "Expected ASGI message 'http.response.start', but got '%s'." raise RuntimeError(msg % message_type) message = cast("HTTPResponseStartEvent", message) self.response_started = True self.waiting_for_100_continue = False status_code = message["status"] headers = self.default_headers + list(message.get("headers", [])) if CLOSE_HEADER in self.scope["headers"] and CLOSE_HEADER not in headers: headers = headers + [CLOSE_HEADER] if self.access_log: self.access_logger.info( '%s - "%s %s HTTP/%s" %d', get_client_addr(self.scope), self.scope["method"], get_path_with_query_string(self.scope), self.scope["http_version"], status_code, ) # Write response status line and headers reason = STATUS_PHRASES[status_code] event = h11.Response( status_code=status_code, headers=headers, reason=reason ) output = self.conn.send(event) self.transport.write(output) elif not self.response_complete: # Sending response body if message_type != "http.response.body": msg = "Expected ASGI message 'http.response.body', but got '%s'." raise RuntimeError(msg % message_type) message = cast("HTTPResponseBodyEvent", message) body = message.get("body", b"") more_body = message.get("more_body", False) # Write response body if self.scope["method"] == "HEAD": event = h11.Data(data=b"") else: event = h11.Data(data=body) output = self.conn.send(event) self.transport.write(output) # Handle response completion if not more_body: self.response_complete = True self.message_event.set() event = h11.EndOfMessage() output = self.conn.send(event) self.transport.write(output) else: # Response already sent msg = "Unexpected ASGI message '%s' sent, after response already completed." raise RuntimeError(msg % message_type) if self.response_complete: if self.conn.our_state is h11.MUST_CLOSE or not self.keep_alive: event = h11.ConnectionClosed() self.conn.send(event) self.transport.close() self.on_response() async def receive(self) -> "ASGIReceiveEvent": if self.waiting_for_100_continue and not self.transport.is_closing(): event = h11.InformationalResponse( status_code=100, headers=[], reason="Continue" ) output = self.conn.send(event) self.transport.write(output) self.waiting_for_100_continue = False if not self.disconnected and not self.response_complete: self.flow.resume_reading() await self.message_event.wait() self.message_event.clear() message: "Union[HTTPDisconnectEvent, HTTPRequestEvent]" if self.disconnected or self.response_complete: message = {"type": "http.disconnect"} else: message = { "type": "http.request", "body": self.body, "more_body": self.more_body, } self.body = b"" return message
20,487
Python
34.324138
169
0.559574
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/uvicorn/protocols/http/httptools_impl.py
import asyncio import http import logging import re import sys import urllib from asyncio.events import TimerHandle from collections import deque from typing import ( TYPE_CHECKING, Any, Callable, Deque, Dict, List, Optional, Tuple, Union, cast, ) import httptools from uvicorn.config import Config from uvicorn.logging import TRACE_LOG_LEVEL from uvicorn.protocols.http.flow_control import ( CLOSE_HEADER, HIGH_WATER_LIMIT, FlowControl, service_unavailable, ) from uvicorn.protocols.utils import ( get_client_addr, get_local_addr, get_path_with_query_string, get_remote_addr, is_ssl, ) from uvicorn.server import ServerState if sys.version_info < (3, 8): # pragma: py-gte-38 from typing_extensions import Literal else: # pragma: py-lt-38 from typing import Literal if TYPE_CHECKING: from asgiref.typing import ( ASGI3Application, ASGIReceiveEvent, ASGISendEvent, HTTPDisconnectEvent, HTTPRequestEvent, HTTPResponseBodyEvent, HTTPResponseStartEvent, HTTPScope, ) HEADER_RE = re.compile(b'[\x00-\x1F\x7F()<>@,;:[]={} \t\\"]') HEADER_VALUE_RE = re.compile(b"[\x00-\x1F\x7F]") def _get_status_line(status_code: int) -> bytes: try: phrase = http.HTTPStatus(status_code).phrase.encode() except ValueError: phrase = b"" return b"".join([b"HTTP/1.1 ", str(status_code).encode(), b" ", phrase, b"\r\n"]) STATUS_LINE = { status_code: _get_status_line(status_code) for status_code in range(100, 600) } class HttpToolsProtocol(asyncio.Protocol): def __init__( self, config: Config, server_state: ServerState, app_state: Dict[str, Any], _loop: Optional[asyncio.AbstractEventLoop] = None, ) -> None: if not config.loaded: config.load() self.config = config self.app = config.loaded_app self.loop = _loop or asyncio.get_event_loop() self.logger = logging.getLogger("uvicorn.error") self.access_logger = logging.getLogger("uvicorn.access") self.access_log = self.access_logger.hasHandlers() self.parser = httptools.HttpRequestParser(self) self.ws_protocol_class = config.ws_protocol_class self.root_path = config.root_path self.limit_concurrency = config.limit_concurrency self.app_state = app_state # Timeouts self.timeout_keep_alive_task: Optional[TimerHandle] = None self.timeout_keep_alive = config.timeout_keep_alive # Global state self.server_state = server_state self.connections = server_state.connections self.tasks = server_state.tasks # Per-connection state self.transport: asyncio.Transport = None # type: ignore[assignment] self.flow: FlowControl = None # type: ignore[assignment] self.server: Optional[Tuple[str, int]] = None self.client: Optional[Tuple[str, int]] = None self.scheme: Optional[Literal["http", "https"]] = None self.pipeline: Deque[Tuple[RequestResponseCycle, ASGI3Application]] = deque() # Per-request state self.scope: HTTPScope = None # type: ignore[assignment] self.headers: List[Tuple[bytes, bytes]] = None # type: ignore[assignment] self.expect_100_continue = False self.cycle: RequestResponseCycle = None # type: ignore[assignment] # Protocol interface def connection_made( # type: ignore[override] self, transport: asyncio.Transport ) -> None: self.connections.add(self) self.transport = transport self.flow = FlowControl(transport) self.server = get_local_addr(transport) self.client = get_remote_addr(transport) self.scheme = "https" if is_ssl(transport) else "http" if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sHTTP connection made", prefix) def connection_lost(self, exc: Optional[Exception]) -> None: self.connections.discard(self) if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sHTTP connection lost", prefix) if self.cycle and not self.cycle.response_complete: self.cycle.disconnected = True if self.cycle is not None: self.cycle.message_event.set() if self.flow is not None: self.flow.resume_writing() if exc is None: self.transport.close() self._unset_keepalive_if_required() self.parser = None def eof_received(self) -> None: pass def _unset_keepalive_if_required(self) -> None: if self.timeout_keep_alive_task is not None: self.timeout_keep_alive_task.cancel() self.timeout_keep_alive_task = None def _get_upgrade(self) -> Optional[bytes]: connection = [] upgrade = None for name, value in self.headers: if name == b"connection": connection = [token.lower().strip() for token in value.split(b",")] if name == b"upgrade": upgrade = value.lower() if b"upgrade" in connection: return upgrade return None def _should_upgrade_to_ws(self, upgrade: Optional[bytes]) -> bool: if upgrade == b"websocket" and self.ws_protocol_class is not None: return True if self.config.ws == "auto": msg = "Unsupported upgrade request." self.logger.warning(msg) msg = "No supported WebSocket library detected. Please use 'pip install uvicorn[standard]', or install 'websockets' or 'wsproto' manually." # noqa: E501 self.logger.warning(msg) return False def _should_upgrade(self) -> bool: upgrade = self._get_upgrade() return self._should_upgrade_to_ws(upgrade) def data_received(self, data: bytes) -> None: self._unset_keepalive_if_required() try: self.parser.feed_data(data) except httptools.HttpParserError: msg = "Invalid HTTP request received." self.logger.warning(msg) self.send_400_response(msg) return except httptools.HttpParserUpgrade: upgrade = self._get_upgrade() if self._should_upgrade_to_ws(upgrade): self.handle_websocket_upgrade() def handle_websocket_upgrade(self) -> None: if self.logger.level <= TRACE_LOG_LEVEL: prefix = "%s:%d - " % self.client if self.client else "" self.logger.log(TRACE_LOG_LEVEL, "%sUpgrading to WebSocket", prefix) self.connections.discard(self) method = self.scope["method"].encode() output = [method, b" ", self.url, b" HTTP/1.1\r\n"] for name, value in self.scope["headers"]: output += [name, b": ", value, b"\r\n"] output.append(b"\r\n") protocol = self.ws_protocol_class( # type: ignore[call-arg, misc] config=self.config, server_state=self.server_state, app_state=self.app_state, ) protocol.connection_made(self.transport) protocol.data_received(b"".join(output)) self.transport.set_protocol(protocol) def send_400_response(self, msg: str) -> None: content = [STATUS_LINE[400]] for name, value in self.server_state.default_headers: content.extend([name, b": ", value, b"\r\n"]) content.extend( [ b"content-type: text/plain; charset=utf-8\r\n", b"content-length: " + str(len(msg)).encode("ascii") + b"\r\n", b"connection: close\r\n", b"\r\n", msg.encode("ascii"), ] ) self.transport.write(b"".join(content)) self.transport.close() def on_message_begin(self) -> None: self.url = b"" self.expect_100_continue = False self.headers = [] self.scope = { # type: ignore[typeddict-item] "type": "http", "asgi": {"version": self.config.asgi_version, "spec_version": "2.3"}, "http_version": "1.1", "server": self.server, "client": self.client, "scheme": self.scheme, "root_path": self.root_path, "headers": self.headers, "state": self.app_state.copy(), } # Parser callbacks def on_url(self, url: bytes) -> None: self.url += url def on_header(self, name: bytes, value: bytes) -> None: name = name.lower() if name == b"expect" and value.lower() == b"100-continue": self.expect_100_continue = True self.headers.append((name, value)) def on_headers_complete(self) -> None: http_version = self.parser.get_http_version() method = self.parser.get_method() self.scope["method"] = method.decode("ascii") if http_version != "1.1": self.scope["http_version"] = http_version if self.parser.should_upgrade() and self._should_upgrade(): return parsed_url = httptools.parse_url(self.url) raw_path = parsed_url.path path = raw_path.decode("ascii") if "%" in path: path = urllib.parse.unquote(path) self.scope["path"] = path self.scope["raw_path"] = raw_path self.scope["query_string"] = parsed_url.query or b"" # Handle 503 responses when 'limit_concurrency' is exceeded. if self.limit_concurrency is not None and ( len(self.connections) >= self.limit_concurrency or len(self.tasks) >= self.limit_concurrency ): app = service_unavailable message = "Exceeded concurrency limit." self.logger.warning(message) else: app = self.app existing_cycle = self.cycle self.cycle = RequestResponseCycle( scope=self.scope, transport=self.transport, flow=self.flow, logger=self.logger, access_logger=self.access_logger, access_log=self.access_log, default_headers=self.server_state.default_headers, message_event=asyncio.Event(), expect_100_continue=self.expect_100_continue, keep_alive=http_version != "1.0", on_response=self.on_response_complete, ) if existing_cycle is None or existing_cycle.response_complete: # Standard case - start processing the request. task = self.loop.create_task(self.cycle.run_asgi(app)) task.add_done_callback(self.tasks.discard) self.tasks.add(task) else: # Pipelined HTTP requests need to be queued up. self.flow.pause_reading() self.pipeline.appendleft((self.cycle, app)) def on_body(self, body: bytes) -> None: if ( self.parser.should_upgrade() and self._should_upgrade() ) or self.cycle.response_complete: return self.cycle.body += body if len(self.cycle.body) > HIGH_WATER_LIMIT: self.flow.pause_reading() self.cycle.message_event.set() def on_message_complete(self) -> None: if ( self.parser.should_upgrade() and self._should_upgrade() ) or self.cycle.response_complete: return self.cycle.more_body = False self.cycle.message_event.set() def on_response_complete(self) -> None: # Callback for pipelined HTTP requests to be started. self.server_state.total_requests += 1 if self.transport.is_closing(): return # Set a short Keep-Alive timeout. self._unset_keepalive_if_required() self.timeout_keep_alive_task = self.loop.call_later( self.timeout_keep_alive, self.timeout_keep_alive_handler ) # Unpause data reads if needed. self.flow.resume_reading() # Unblock any pipelined events. if self.pipeline: cycle, app = self.pipeline.pop() task = self.loop.create_task(cycle.run_asgi(app)) task.add_done_callback(self.tasks.discard) self.tasks.add(task) def shutdown(self) -> None: """ Called by the server to commence a graceful shutdown. """ if self.cycle is None or self.cycle.response_complete: self.transport.close() else: self.cycle.keep_alive = False def pause_writing(self) -> None: """ Called by the transport when the write buffer exceeds the high water mark. """ self.flow.pause_writing() def resume_writing(self) -> None: """ Called by the transport when the write buffer drops below the low water mark. """ self.flow.resume_writing() def timeout_keep_alive_handler(self) -> None: """ Called on a keep-alive connection if no new data is received after a short delay. """ if not self.transport.is_closing(): self.transport.close() class RequestResponseCycle: def __init__( self, scope: "HTTPScope", transport: asyncio.Transport, flow: FlowControl, logger: logging.Logger, access_logger: logging.Logger, access_log: bool, default_headers: List[Tuple[bytes, bytes]], message_event: asyncio.Event, expect_100_continue: bool, keep_alive: bool, on_response: Callable[..., None], ): self.scope = scope self.transport = transport self.flow = flow self.logger = logger self.access_logger = access_logger self.access_log = access_log self.default_headers = default_headers self.message_event = message_event self.on_response = on_response # Connection state self.disconnected = False self.keep_alive = keep_alive self.waiting_for_100_continue = expect_100_continue # Request state self.body = b"" self.more_body = True # Response state self.response_started = False self.response_complete = False self.chunked_encoding: Optional[bool] = None self.expected_content_length = 0 # ASGI exception wrapper async def run_asgi(self, app: "ASGI3Application") -> None: try: result = await app( # type: ignore[func-returns-value] self.scope, self.receive, self.send ) except BaseException as exc: msg = "Exception in ASGI application\n" self.logger.error(msg, exc_info=exc) if not self.response_started: await self.send_500_response() else: self.transport.close() else: if result is not None: msg = "ASGI callable should return None, but returned '%s'." self.logger.error(msg, result) self.transport.close() elif not self.response_started and not self.disconnected: msg = "ASGI callable returned without starting response." self.logger.error(msg) await self.send_500_response() elif not self.response_complete and not self.disconnected: msg = "ASGI callable returned without completing response." self.logger.error(msg) self.transport.close() finally: self.on_response = lambda: None async def send_500_response(self) -> None: response_start_event: "HTTPResponseStartEvent" = { "type": "http.response.start", "status": 500, "headers": [ (b"content-type", b"text/plain; charset=utf-8"), (b"connection", b"close"), ], } await self.send(response_start_event) response_body_event: "HTTPResponseBodyEvent" = { "type": "http.response.body", "body": b"Internal Server Error", "more_body": False, } await self.send(response_body_event) # ASGI interface async def send(self, message: "ASGISendEvent") -> None: message_type = message["type"] if self.flow.write_paused and not self.disconnected: await self.flow.drain() if self.disconnected: return if not self.response_started: # Sending response status line and headers if message_type != "http.response.start": msg = "Expected ASGI message 'http.response.start', but got '%s'." raise RuntimeError(msg % message_type) message = cast("HTTPResponseStartEvent", message) self.response_started = True self.waiting_for_100_continue = False status_code = message["status"] headers = self.default_headers + list(message.get("headers", [])) if CLOSE_HEADER in self.scope["headers"] and CLOSE_HEADER not in headers: headers = headers + [CLOSE_HEADER] if self.access_log: self.access_logger.info( '%s - "%s %s HTTP/%s" %d', get_client_addr(self.scope), self.scope["method"], get_path_with_query_string(self.scope), self.scope["http_version"], status_code, ) # Write response status line and headers content = [STATUS_LINE[status_code]] for name, value in headers: if HEADER_RE.search(name): raise RuntimeError("Invalid HTTP header name.") if HEADER_VALUE_RE.search(value): raise RuntimeError("Invalid HTTP header value.") name = name.lower() if name == b"content-length" and self.chunked_encoding is None: self.expected_content_length = int(value.decode()) self.chunked_encoding = False elif name == b"transfer-encoding" and value.lower() == b"chunked": self.expected_content_length = 0 self.chunked_encoding = True elif name == b"connection" and value.lower() == b"close": self.keep_alive = False content.extend([name, b": ", value, b"\r\n"]) if ( self.chunked_encoding is None and self.scope["method"] != "HEAD" and status_code not in (204, 304) ): # Neither content-length nor transfer-encoding specified self.chunked_encoding = True content.append(b"transfer-encoding: chunked\r\n") content.append(b"\r\n") self.transport.write(b"".join(content)) elif not self.response_complete: # Sending response body if message_type != "http.response.body": msg = "Expected ASGI message 'http.response.body', but got '%s'." raise RuntimeError(msg % message_type) body = cast(bytes, message.get("body", b"")) more_body = message.get("more_body", False) # Write response body if self.scope["method"] == "HEAD": self.expected_content_length = 0 elif self.chunked_encoding: if body: content = [b"%x\r\n" % len(body), body, b"\r\n"] else: content = [] if not more_body: content.append(b"0\r\n\r\n") self.transport.write(b"".join(content)) else: num_bytes = len(body) if num_bytes > self.expected_content_length: raise RuntimeError("Response content longer than Content-Length") else: self.expected_content_length -= num_bytes self.transport.write(body) # Handle response completion if not more_body: if self.expected_content_length != 0: raise RuntimeError("Response content shorter than Content-Length") self.response_complete = True self.message_event.set() if not self.keep_alive: self.transport.close() self.on_response() else: # Response already sent msg = "Unexpected ASGI message '%s' sent, after response already completed." raise RuntimeError(msg % message_type) async def receive(self) -> "ASGIReceiveEvent": if self.waiting_for_100_continue and not self.transport.is_closing(): self.transport.write(b"HTTP/1.1 100 Continue\r\n\r\n") self.waiting_for_100_continue = False if not self.disconnected and not self.response_complete: self.flow.resume_reading() await self.message_event.wait() self.message_event.clear() message: "Union[HTTPDisconnectEvent, HTTPRequestEvent]" if self.disconnected or self.response_complete: message = {"type": "http.disconnect"} else: message = { "type": "http.request", "body": self.body, "more_body": self.more_body, } self.body = b"" return message
21,866
Python
34.730392
165
0.570017
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multipart/exceptions.py
class FormParserError(ValueError): """Base error class for our form parser.""" pass class ParseError(FormParserError): """This exception (or a subclass) is raised when there is an error while parsing something. """ #: This is the offset in the input data chunk (*NOT* the overall stream) in #: which the parse error occurred. It will be -1 if not specified. offset = -1 class MultipartParseError(ParseError): """This is a specific error that is raised when the MultipartParser detects an error while parsing. """ pass class QuerystringParseError(ParseError): """This is a specific error that is raised when the QuerystringParser detects an error while parsing. """ pass class DecodeError(ParseError): """This exception is raised when there is a decoding error - for example with the Base64Decoder or QuotedPrintableDecoder. """ pass # On Python 3.3, IOError is the same as OSError, so we don't want to inherit # from both of them. We handle this case below. if IOError is not OSError: # pragma: no cover class FileError(FormParserError, IOError, OSError): """Exception class for problems with the File class.""" pass else: # pragma: no cover class FileError(FormParserError, OSError): """Exception class for problems with the File class.""" pass
1,410
Python
29.021276
79
0.678723
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multipart/multipart.py
from .decoders import * from .exceptions import * import os import re import sys import shutil import logging import tempfile from io import BytesIO from numbers import Number # Unique missing object. _missing = object() # States for the querystring parser. STATE_BEFORE_FIELD = 0 STATE_FIELD_NAME = 1 STATE_FIELD_DATA = 2 # States for the multipart parser STATE_START = 0 STATE_START_BOUNDARY = 1 STATE_HEADER_FIELD_START = 2 STATE_HEADER_FIELD = 3 STATE_HEADER_VALUE_START = 4 STATE_HEADER_VALUE = 5 STATE_HEADER_VALUE_ALMOST_DONE = 6 STATE_HEADERS_ALMOST_DONE = 7 STATE_PART_DATA_START = 8 STATE_PART_DATA = 9 STATE_PART_DATA_END = 10 STATE_END = 11 STATES = [ "START", "START_BOUNDARY", "HEADER_FIELD_START", "HEADER_FIELD", "HEADER_VALUE_START", "HEADER_VALUE", "HEADER_VALUE_ALMOST_DONE", "HEADRES_ALMOST_DONE", "PART_DATA_START", "PART_DATA", "PART_DATA_END", "END" ] # Flags for the multipart parser. FLAG_PART_BOUNDARY = 1 FLAG_LAST_BOUNDARY = 2 # Get constants. Since iterating over a str on Python 2 gives you a 1-length # string, but iterating over a bytes object on Python 3 gives you an integer, # we need to save these constants. CR = b'\r'[0] LF = b'\n'[0] COLON = b':'[0] SPACE = b' '[0] HYPHEN = b'-'[0] AMPERSAND = b'&'[0] SEMICOLON = b';'[0] LOWER_A = b'a'[0] LOWER_Z = b'z'[0] NULL = b'\x00'[0] # Lower-casing a character is different, because of the difference between # str on Py2, and bytes on Py3. Same with getting the ordinal value of a byte, # and joining a list of bytes together. # These functions abstract that. lower_char = lambda c: c | 0x20 ord_char = lambda c: c join_bytes = lambda b: bytes(list(b)) # These are regexes for parsing header values. SPECIAL_CHARS = re.escape(b'()<>@,;:\\"/[]?={} \t') QUOTED_STR = br'"(?:\\.|[^"])*"' VALUE_STR = br'(?:[^' + SPECIAL_CHARS + br']+|' + QUOTED_STR + br')' OPTION_RE_STR = ( br'(?:;|^)\s*([^' + SPECIAL_CHARS + br']+)\s*=\s*(' + VALUE_STR + br')' ) OPTION_RE = re.compile(OPTION_RE_STR) QUOTE = b'"'[0] def parse_options_header(value): """ Parses a Content-Type header into a value in the following format: (content_type, {parameters}) """ if not value: return (b'', {}) # If we are passed a string, we assume that it conforms to WSGI and does # not contain any code point that's not in latin-1. if isinstance(value, str): # pragma: no cover value = value.encode('latin-1') # If we have no options, return the string as-is. if b';' not in value: return (value.lower().strip(), {}) # Split at the first semicolon, to get our value and then options. ctype, rest = value.split(b';', 1) options = {} # Parse the options. for match in OPTION_RE.finditer(rest): key = match.group(1).lower() value = match.group(2) if value[0] == QUOTE and value[-1] == QUOTE: # Unquote the value. value = value[1:-1] value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"') # If the value is a filename, we need to fix a bug on IE6 that sends # the full file path instead of the filename. if key == b'filename': if value[1:3] == b':\\' or value[:2] == b'\\\\': value = value.split(b'\\')[-1] options[key] = value return ctype, options class Field: """A Field object represents a (parsed) form field. It represents a single field with a corresponding name and value. The name that a :class:`Field` will be instantiated with is the same name that would be found in the following HTML:: <input name="name_goes_here" type="text"/> This class defines two methods, :meth:`on_data` and :meth:`on_end`, that will be called when data is written to the Field, and when the Field is finalized, respectively. :param name: the name of the form field """ def __init__(self, name): self._name = name self._value = [] # We cache the joined version of _value for speed. self._cache = _missing @classmethod def from_value(klass, name, value): """Create an instance of a :class:`Field`, and set the corresponding value - either None or an actual value. This method will also finalize the Field itself. :param name: the name of the form field :param value: the value of the form field - either a bytestring or None """ f = klass(name) if value is None: f.set_none() else: f.write(value) f.finalize() return f def write(self, data): """Write some data into the form field. :param data: a bytestring """ return self.on_data(data) def on_data(self, data): """This method is a callback that will be called whenever data is written to the Field. :param data: a bytestring """ self._value.append(data) self._cache = _missing return len(data) def on_end(self): """This method is called whenever the Field is finalized. """ if self._cache is _missing: self._cache = b''.join(self._value) def finalize(self): """Finalize the form field. """ self.on_end() def close(self): """Close the Field object. This will free any underlying cache. """ # Free our value array. if self._cache is _missing: self._cache = b''.join(self._value) del self._value def set_none(self): """Some fields in a querystring can possibly have a value of None - for example, the string "foo&bar=&baz=asdf" will have a field with the name "foo" and value None, one with name "bar" and value "", and one with name "baz" and value "asdf". Since the write() interface doesn't support writing None, this function will set the field value to None. """ self._cache = None @property def field_name(self): """This property returns the name of the field.""" return self._name @property def value(self): """This property returns the value of the form field.""" if self._cache is _missing: self._cache = b''.join(self._value) return self._cache def __eq__(self, other): if isinstance(other, Field): return ( self.field_name == other.field_name and self.value == other.value ) else: return NotImplemented def __repr__(self): if len(self.value) > 97: # We get the repr, and then insert three dots before the final # quote. v = repr(self.value[:97])[:-1] + "...'" else: v = repr(self.value) return "{}(field_name={!r}, value={})".format( self.__class__.__name__, self.field_name, v ) class File: """This class represents an uploaded file. It handles writing file data to either an in-memory file or a temporary file on-disk, if the optional threshold is passed. There are some options that can be passed to the File to change behavior of the class. Valid options are as follows: .. list-table:: :widths: 15 5 5 30 :header-rows: 1 * - Name - Type - Default - Description * - UPLOAD_DIR - `str` - None - The directory to store uploaded files in. If this is None, a temporary file will be created in the system's standard location. * - UPLOAD_DELETE_TMP - `bool` - True - Delete automatically created TMP file * - UPLOAD_KEEP_FILENAME - `bool` - False - Whether or not to keep the filename of the uploaded file. If True, then the filename will be converted to a safe representation (e.g. by removing any invalid path segments), and then saved with the same name). Otherwise, a temporary name will be used. * - UPLOAD_KEEP_EXTENSIONS - `bool` - False - Whether or not to keep the uploaded file's extension. If False, the file will be saved with the default temporary extension (usually ".tmp"). Otherwise, the file's extension will be maintained. Note that this will properly combine with the UPLOAD_KEEP_FILENAME setting. * - MAX_MEMORY_FILE_SIZE - `int` - 1 MiB - The maximum number of bytes of a File to keep in memory. By default, the contents of a File are kept into memory until a certain limit is reached, after which the contents of the File are written to a temporary file. This behavior can be disabled by setting this value to an appropriately large value (or, for example, infinity, such as `float('inf')`. :param file_name: The name of the file that this :class:`File` represents :param field_name: The field name that uploaded this file. Note that this can be None, if, for example, the file was uploaded with Content-Type application/octet-stream :param config: The configuration for this File. See above for valid configuration keys and their corresponding values. """ def __init__(self, file_name, field_name=None, config={}): # Save configuration, set other variables default. self.logger = logging.getLogger(__name__) self._config = config self._in_memory = True self._bytes_written = 0 self._fileobj = BytesIO() # Save the provided field/file name. self._field_name = field_name self._file_name = file_name # Our actual file name is None by default, since, depending on our # config, we may not actually use the provided name. self._actual_file_name = None # Split the extension from the filename. if file_name is not None: base, ext = os.path.splitext(file_name) self._file_base = base self._ext = ext @property def field_name(self): """The form field associated with this file. May be None if there isn't one, for example when we have an application/octet-stream upload. """ return self._field_name @property def file_name(self): """The file name given in the upload request. """ return self._file_name @property def actual_file_name(self): """The file name that this file is saved as. Will be None if it's not currently saved on disk. """ return self._actual_file_name @property def file_object(self): """The file object that we're currently writing to. Note that this will either be an instance of a :class:`io.BytesIO`, or a regular file object. """ return self._fileobj @property def size(self): """The total size of this file, counted as the number of bytes that currently have been written to the file. """ return self._bytes_written @property def in_memory(self): """A boolean representing whether or not this file object is currently stored in-memory or on-disk. """ return self._in_memory def flush_to_disk(self): """If the file is already on-disk, do nothing. Otherwise, copy from the in-memory buffer to a disk file, and then reassign our internal file object to this new disk file. Note that if you attempt to flush a file that is already on-disk, a warning will be logged to this module's logger. """ if not self._in_memory: self.logger.warning( "Trying to flush to disk when we're not in memory" ) return # Go back to the start of our file. self._fileobj.seek(0) # Open a new file. new_file = self._get_disk_file() # Copy the file objects. shutil.copyfileobj(self._fileobj, new_file) # Seek to the new position in our new file. new_file.seek(self._bytes_written) # Reassign the fileobject. old_fileobj = self._fileobj self._fileobj = new_file # We're no longer in memory. self._in_memory = False # Close the old file object. old_fileobj.close() def _get_disk_file(self): """This function is responsible for getting a file object on-disk for us. """ self.logger.info("Opening a file on disk") file_dir = self._config.get('UPLOAD_DIR') keep_filename = self._config.get('UPLOAD_KEEP_FILENAME', False) keep_extensions = self._config.get('UPLOAD_KEEP_EXTENSIONS', False) delete_tmp = self._config.get('UPLOAD_DELETE_TMP', True) # If we have a directory and are to keep the filename... if file_dir is not None and keep_filename: self.logger.info("Saving with filename in: %r", file_dir) # Build our filename. # TODO: what happens if we don't have a filename? fname = self._file_base if keep_extensions: fname = fname + self._ext path = os.path.join(file_dir, fname) try: self.logger.info("Opening file: %r", path) tmp_file = open(path, 'w+b') except OSError as e: tmp_file = None self.logger.exception("Error opening temporary file") raise FileError("Error opening temporary file: %r" % path) else: # Build options array. # Note that on Python 3, tempfile doesn't support byte names. We # encode our paths using the default filesystem encoding. options = {} if keep_extensions: ext = self._ext if isinstance(ext, bytes): ext = ext.decode(sys.getfilesystemencoding()) options['suffix'] = ext if file_dir is not None: d = file_dir if isinstance(d, bytes): d = d.decode(sys.getfilesystemencoding()) options['dir'] = d options['delete'] = delete_tmp # Create a temporary (named) file with the appropriate settings. self.logger.info("Creating a temporary file with options: %r", options) try: tmp_file = tempfile.NamedTemporaryFile(**options) except OSError: self.logger.exception("Error creating named temporary file") raise FileError("Error creating named temporary file") fname = tmp_file.name # Encode filename as bytes. if isinstance(fname, str): fname = fname.encode(sys.getfilesystemencoding()) self._actual_file_name = fname return tmp_file def write(self, data): """Write some data to the File. :param data: a bytestring """ return self.on_data(data) def on_data(self, data): """This method is a callback that will be called whenever data is written to the File. :param data: a bytestring """ pos = self._fileobj.tell() bwritten = self._fileobj.write(data) # true file objects write returns None if bwritten is None: bwritten = self._fileobj.tell() - pos # If the bytes written isn't the same as the length, just return. if bwritten != len(data): self.logger.warning("bwritten != len(data) (%d != %d)", bwritten, len(data)) return bwritten # Keep track of how many bytes we've written. self._bytes_written += bwritten # If we're in-memory and are over our limit, we create a file. if (self._in_memory and self._config.get('MAX_MEMORY_FILE_SIZE') is not None and (self._bytes_written > self._config.get('MAX_MEMORY_FILE_SIZE'))): self.logger.info("Flushing to disk") self.flush_to_disk() # Return the number of bytes written. return bwritten def on_end(self): """This method is called whenever the Field is finalized. """ # Flush the underlying file object self._fileobj.flush() def finalize(self): """Finalize the form file. This will not close the underlying file, but simply signal that we are finished writing to the File. """ self.on_end() def close(self): """Close the File object. This will actually close the underlying file object (whether it's a :class:`io.BytesIO` or an actual file object). """ self._fileobj.close() def __repr__(self): return "{}(file_name={!r}, field_name={!r})".format( self.__class__.__name__, self.file_name, self.field_name ) class BaseParser: """This class is the base class for all parsers. It contains the logic for calling and adding callbacks. A callback can be one of two different forms. "Notification callbacks" are callbacks that are called when something happens - for example, when a new part of a multipart message is encountered by the parser. "Data callbacks" are called when we get some sort of data - for example, part of the body of a multipart chunk. Notification callbacks are called with no parameters, whereas data callbacks are called with three, as follows:: data_callback(data, start, end) The "data" parameter is a bytestring (i.e. "foo" on Python 2, or b"foo" on Python 3). "start" and "end" are integer indexes into the "data" string that represent the data of interest. Thus, in a data callback, the slice `data[start:end]` represents the data that the callback is "interested in". The callback is not passed a copy of the data, since copying severely hurts performance. """ def __init__(self): self.logger = logging.getLogger(__name__) def callback(self, name, data=None, start=None, end=None): """This function calls a provided callback with some data. If the callback is not set, will do nothing. :param name: The name of the callback to call (as a string). :param data: Data to pass to the callback. If None, then it is assumed that the callback is a notification callback, and no parameters are given. :param end: An integer that is passed to the data callback. :param start: An integer that is passed to the data callback. """ name = "on_" + name func = self.callbacks.get(name) if func is None: return # Depending on whether we're given a buffer... if data is not None: # Don't do anything if we have start == end. if start is not None and start == end: return self.logger.debug("Calling %s with data[%d:%d]", name, start, end) func(data, start, end) else: self.logger.debug("Calling %s with no data", name) func() def set_callback(self, name, new_func): """Update the function for a callback. Removes from the callbacks dict if new_func is None. :param name: The name of the callback to call (as a string). :param new_func: The new function for the callback. If None, then the callback will be removed (with no error if it does not exist). """ if new_func is None: self.callbacks.pop('on_' + name, None) else: self.callbacks['on_' + name] = new_func def close(self): pass # pragma: no cover def finalize(self): pass # pragma: no cover def __repr__(self): return "%s()" % self.__class__.__name__ class OctetStreamParser(BaseParser): """This parser parses an octet-stream request body and calls callbacks when incoming data is received. Callbacks are as follows: .. list-table:: :widths: 15 10 30 :header-rows: 1 * - Callback Name - Parameters - Description * - on_start - None - Called when the first data is parsed. * - on_data - data, start, end - Called for each data chunk that is parsed. * - on_end - None - Called when the parser is finished parsing all data. :param callbacks: A dictionary of callbacks. See the documentation for :class:`BaseParser`. :param max_size: The maximum size of body to parse. Defaults to infinity - i.e. unbounded. """ def __init__(self, callbacks={}, max_size=float('inf')): super().__init__() self.callbacks = callbacks self._started = False if not isinstance(max_size, Number) or max_size < 1: raise ValueError("max_size must be a positive number, not %r" % max_size) self.max_size = max_size self._current_size = 0 def write(self, data): """Write some data to the parser, which will perform size verification, and then pass the data to the underlying callback. :param data: a bytestring """ if not self._started: self.callback('start') self._started = True # Truncate data length. data_len = len(data) if (self._current_size + data_len) > self.max_size: # We truncate the length of data that we are to process. new_size = int(self.max_size - self._current_size) self.logger.warning("Current size is %d (max %d), so truncating " "data length from %d to %d", self._current_size, self.max_size, data_len, new_size) data_len = new_size # Increment size, then callback, in case there's an exception. self._current_size += data_len self.callback('data', data, 0, data_len) return data_len def finalize(self): """Finalize this parser, which signals to that we are finished parsing, and sends the on_end callback. """ self.callback('end') def __repr__(self): return "%s()" % self.__class__.__name__ class QuerystringParser(BaseParser): """This is a streaming querystring parser. It will consume data, and call the callbacks given when it has data. .. list-table:: :widths: 15 10 30 :header-rows: 1 * - Callback Name - Parameters - Description * - on_field_start - None - Called when a new field is encountered. * - on_field_name - data, start, end - Called when a portion of a field's name is encountered. * - on_field_data - data, start, end - Called when a portion of a field's data is encountered. * - on_field_end - None - Called when the end of a field is encountered. * - on_end - None - Called when the parser is finished parsing all data. :param callbacks: A dictionary of callbacks. See the documentation for :class:`BaseParser`. :param strict_parsing: Whether or not to parse the body strictly. Defaults to False. If this is set to True, then the behavior of the parser changes as the following: if a field has a value with an equal sign (e.g. "foo=bar", or "foo="), it is always included. If a field has no equals sign (e.g. "...&name&..."), it will be treated as an error if 'strict_parsing' is True, otherwise included. If an error is encountered, then a :class:`multipart.exceptions.QuerystringParseError` will be raised. :param max_size: The maximum size of body to parse. Defaults to infinity - i.e. unbounded. """ def __init__(self, callbacks={}, strict_parsing=False, max_size=float('inf')): super().__init__() self.state = STATE_BEFORE_FIELD self._found_sep = False self.callbacks = callbacks # Max-size stuff if not isinstance(max_size, Number) or max_size < 1: raise ValueError("max_size must be a positive number, not %r" % max_size) self.max_size = max_size self._current_size = 0 # Should parsing be strict? self.strict_parsing = strict_parsing def write(self, data): """Write some data to the parser, which will perform size verification, parse into either a field name or value, and then pass the corresponding data to the underlying callback. If an error is encountered while parsing, a QuerystringParseError will be raised. The "offset" attribute of the raised exception will be set to the offset in the input data chunk (NOT the overall stream) that caused the error. :param data: a bytestring """ # Handle sizing. data_len = len(data) if (self._current_size + data_len) > self.max_size: # We truncate the length of data that we are to process. new_size = int(self.max_size - self._current_size) self.logger.warning("Current size is %d (max %d), so truncating " "data length from %d to %d", self._current_size, self.max_size, data_len, new_size) data_len = new_size l = 0 try: l = self._internal_write(data, data_len) finally: self._current_size += l return l def _internal_write(self, data, length): state = self.state strict_parsing = self.strict_parsing found_sep = self._found_sep i = 0 while i < length: ch = data[i] # Depending on our state... if state == STATE_BEFORE_FIELD: # If the 'found_sep' flag is set, we've already encountered # and skipped a single separator. If so, we check our strict # parsing flag and decide what to do. Otherwise, we haven't # yet reached a separator, and thus, if we do, we need to skip # it as it will be the boundary between fields that's supposed # to be there. if ch == AMPERSAND or ch == SEMICOLON: if found_sep: # If we're parsing strictly, we disallow blank chunks. if strict_parsing: e = QuerystringParseError( "Skipping duplicate ampersand/semicolon at " "%d" % i ) e.offset = i raise e else: self.logger.debug("Skipping duplicate ampersand/" "semicolon at %d", i) else: # This case is when we're skipping the (first) # separator between fields, so we just set our flag # and continue on. found_sep = True else: # Emit a field-start event, and go to that state. Also, # reset the "found_sep" flag, for the next time we get to # this state. self.callback('field_start') i -= 1 state = STATE_FIELD_NAME found_sep = False elif state == STATE_FIELD_NAME: # Try and find a separator - we ensure that, if we do, we only # look for the equal sign before it. sep_pos = data.find(b'&', i) if sep_pos == -1: sep_pos = data.find(b';', i) # See if we can find an equals sign in the remaining data. If # so, we can immediately emit the field name and jump to the # data state. if sep_pos != -1: equals_pos = data.find(b'=', i, sep_pos) else: equals_pos = data.find(b'=', i) if equals_pos != -1: # Emit this name. self.callback('field_name', data, i, equals_pos) # Jump i to this position. Note that it will then have 1 # added to it below, which means the next iteration of this # loop will inspect the character after the equals sign. i = equals_pos state = STATE_FIELD_DATA else: # No equals sign found. if not strict_parsing: # See also comments in the STATE_FIELD_DATA case below. # If we found the separator, we emit the name and just # end - there's no data callback at all (not even with # a blank value). if sep_pos != -1: self.callback('field_name', data, i, sep_pos) self.callback('field_end') i = sep_pos - 1 state = STATE_BEFORE_FIELD else: # Otherwise, no separator in this block, so the # rest of this chunk must be a name. self.callback('field_name', data, i, length) i = length else: # We're parsing strictly. If we find a separator, # this is an error - we require an equals sign. if sep_pos != -1: e = QuerystringParseError( "When strict_parsing is True, we require an " "equals sign in all field chunks. Did not " "find one in the chunk that starts at %d" % (i,) ) e.offset = i raise e # No separator in the rest of this chunk, so it's just # a field name. self.callback('field_name', data, i, length) i = length elif state == STATE_FIELD_DATA: # Try finding either an ampersand or a semicolon after this # position. sep_pos = data.find(b'&', i) if sep_pos == -1: sep_pos = data.find(b';', i) # If we found it, callback this bit as data and then go back # to expecting to find a field. if sep_pos != -1: self.callback('field_data', data, i, sep_pos) self.callback('field_end') # Note that we go to the separator, which brings us to the # "before field" state. This allows us to properly emit # "field_start" events only when we actually have data for # a field of some sort. i = sep_pos - 1 state = STATE_BEFORE_FIELD # Otherwise, emit the rest as data and finish. else: self.callback('field_data', data, i, length) i = length else: # pragma: no cover (error case) msg = "Reached an unknown state %d at %d" % (state, i) self.logger.warning(msg) e = QuerystringParseError(msg) e.offset = i raise e i += 1 self.state = state self._found_sep = found_sep return len(data) def finalize(self): """Finalize this parser, which signals to that we are finished parsing, if we're still in the middle of a field, an on_field_end callback, and then the on_end callback. """ # If we're currently in the middle of a field, we finish it. if self.state == STATE_FIELD_DATA: self.callback('field_end') self.callback('end') def __repr__(self): return "{}(strict_parsing={!r}, max_size={!r})".format( self.__class__.__name__, self.strict_parsing, self.max_size ) class MultipartParser(BaseParser): """This class is a streaming multipart/form-data parser. .. list-table:: :widths: 15 10 30 :header-rows: 1 * - Callback Name - Parameters - Description * - on_part_begin - None - Called when a new part of the multipart message is encountered. * - on_part_data - data, start, end - Called when a portion of a part's data is encountered. * - on_part_end - None - Called when the end of a part is reached. * - on_header_begin - None - Called when we've found a new header in a part of a multipart message * - on_header_field - data, start, end - Called each time an additional portion of a header is read (i.e. the part of the header that is before the colon; the "Foo" in "Foo: Bar"). * - on_header_value - data, start, end - Called when we get data for a header. * - on_header_end - None - Called when the current header is finished - i.e. we've reached the newline at the end of the header. * - on_headers_finished - None - Called when all headers are finished, and before the part data starts. * - on_end - None - Called when the parser is finished parsing all data. :param boundary: The multipart boundary. This is required, and must match what is given in the HTTP request - usually in the Content-Type header. :param callbacks: A dictionary of callbacks. See the documentation for :class:`BaseParser`. :param max_size: The maximum size of body to parse. Defaults to infinity - i.e. unbounded. """ def __init__(self, boundary, callbacks={}, max_size=float('inf')): # Initialize parser state. super().__init__() self.state = STATE_START self.index = self.flags = 0 self.callbacks = callbacks if not isinstance(max_size, Number) or max_size < 1: raise ValueError("max_size must be a positive number, not %r" % max_size) self.max_size = max_size self._current_size = 0 # Setup marks. These are used to track the state of data received. self.marks = {} # TODO: Actually use this rather than the dumb version we currently use # # Precompute the skip table for the Boyer-Moore-Horspool algorithm. # skip = [len(boundary) for x in range(256)] # for i in range(len(boundary) - 1): # skip[ord_char(boundary[i])] = len(boundary) - i - 1 # # # We use a tuple since it's a constant, and marginally faster. # self.skip = tuple(skip) # Save our boundary. if isinstance(boundary, str): # pragma: no cover boundary = boundary.encode('latin-1') self.boundary = b'\r\n--' + boundary # Get a set of characters that belong to our boundary. self.boundary_chars = frozenset(self.boundary) # We also create a lookbehind list. # Note: the +8 is since we can have, at maximum, "\r\n--" + boundary + # "--\r\n" at the final boundary, and the length of '\r\n--' and # '--\r\n' is 8 bytes. self.lookbehind = [NULL for x in range(len(boundary) + 8)] def write(self, data): """Write some data to the parser, which will perform size verification, and then parse the data into the appropriate location (e.g. header, data, etc.), and pass this on to the underlying callback. If an error is encountered, a MultipartParseError will be raised. The "offset" attribute on the raised exception will be set to the offset of the byte in the input chunk that caused the error. :param data: a bytestring """ # Handle sizing. data_len = len(data) if (self._current_size + data_len) > self.max_size: # We truncate the length of data that we are to process. new_size = int(self.max_size - self._current_size) self.logger.warning("Current size is %d (max %d), so truncating " "data length from %d to %d", self._current_size, self.max_size, data_len, new_size) data_len = new_size l = 0 try: l = self._internal_write(data, data_len) finally: self._current_size += l return l def _internal_write(self, data, length): # Get values from locals. boundary = self.boundary # Get our state, flags and index. These are persisted between calls to # this function. state = self.state index = self.index flags = self.flags # Our index defaults to 0. i = 0 # Set a mark. def set_mark(name): self.marks[name] = i # Remove a mark. def delete_mark(name, reset=False): self.marks.pop(name, None) # Helper function that makes calling a callback with data easier. The # 'remaining' parameter will callback from the marked value until the # end of the buffer, and reset the mark, instead of deleting it. This # is used at the end of the function to call our callbacks with any # remaining data in this chunk. def data_callback(name, remaining=False): marked_index = self.marks.get(name) if marked_index is None: return # If we're getting remaining data, we ignore the current i value # and just call with the remaining data. if remaining: self.callback(name, data, marked_index, length) self.marks[name] = 0 # Otherwise, we call it from the mark to the current byte we're # processing. else: self.callback(name, data, marked_index, i) self.marks.pop(name, None) # For each byte... while i < length: c = data[i] if state == STATE_START: # Skip leading newlines if c == CR or c == LF: i += 1 self.logger.debug("Skipping leading CR/LF at %d", i) continue # index is used as in index into our boundary. Set to 0. index = 0 # Move to the next state, but decrement i so that we re-process # this character. state = STATE_START_BOUNDARY i -= 1 elif state == STATE_START_BOUNDARY: # Check to ensure that the last 2 characters in our boundary # are CRLF. if index == len(boundary) - 2: if c != CR: # Error! msg = "Did not find CR at end of boundary (%d)" % (i,) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e index += 1 elif index == len(boundary) - 2 + 1: if c != LF: msg = "Did not find LF at end of boundary (%d)" % (i,) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e # The index is now used for indexing into our boundary. index = 0 # Callback for the start of a part. self.callback('part_begin') # Move to the next character and state. state = STATE_HEADER_FIELD_START else: # Check to ensure our boundary matches if c != boundary[index + 2]: msg = "Did not find boundary character %r at index " \ "%d" % (c, index + 2) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e # Increment index into boundary and continue. index += 1 elif state == STATE_HEADER_FIELD_START: # Mark the start of a header field here, reset the index, and # continue parsing our header field. index = 0 # Set a mark of our header field. set_mark('header_field') # Move to parsing header fields. state = STATE_HEADER_FIELD i -= 1 elif state == STATE_HEADER_FIELD: # If we've reached a CR at the beginning of a header, it means # that we've reached the second of 2 newlines, and so there are # no more headers to parse. if c == CR: delete_mark('header_field') state = STATE_HEADERS_ALMOST_DONE i += 1 continue # Increment our index in the header. index += 1 # Do nothing if we encounter a hyphen. if c == HYPHEN: pass # If we've reached a colon, we're done with this header. elif c == COLON: # A 0-length header is an error. if index == 1: msg = "Found 0-length header at %d" % (i,) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e # Call our callback with the header field. data_callback('header_field') # Move to parsing the header value. state = STATE_HEADER_VALUE_START else: # Lower-case this character, and ensure that it is in fact # a valid letter. If not, it's an error. cl = lower_char(c) if cl < LOWER_A or cl > LOWER_Z: msg = "Found non-alphanumeric character %r in " \ "header at %d" % (c, i) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e elif state == STATE_HEADER_VALUE_START: # Skip leading spaces. if c == SPACE: i += 1 continue # Mark the start of the header value. set_mark('header_value') # Move to the header-value state, reprocessing this character. state = STATE_HEADER_VALUE i -= 1 elif state == STATE_HEADER_VALUE: # If we've got a CR, we're nearly done our headers. Otherwise, # we do nothing and just move past this character. if c == CR: data_callback('header_value') self.callback('header_end') state = STATE_HEADER_VALUE_ALMOST_DONE elif state == STATE_HEADER_VALUE_ALMOST_DONE: # The last character should be a LF. If not, it's an error. if c != LF: msg = "Did not find LF character at end of header " \ "(found %r)" % (c,) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e # Move back to the start of another header. Note that if that # state detects ANOTHER newline, it'll trigger the end of our # headers. state = STATE_HEADER_FIELD_START elif state == STATE_HEADERS_ALMOST_DONE: # We're almost done our headers. This is reached when we parse # a CR at the beginning of a header, so our next character # should be a LF, or it's an error. if c != LF: msg = f"Did not find LF at end of headers (found {c!r})" self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e self.callback('headers_finished') state = STATE_PART_DATA_START elif state == STATE_PART_DATA_START: # Mark the start of our part data. set_mark('part_data') # Start processing part data, including this character. state = STATE_PART_DATA i -= 1 elif state == STATE_PART_DATA: # We're processing our part data right now. During this, we # need to efficiently search for our boundary, since any data # on any number of lines can be a part of the current data. # We use the Boyer-Moore-Horspool algorithm to efficiently # search through the remainder of the buffer looking for our # boundary. # Save the current value of our index. We use this in case we # find part of a boundary, but it doesn't match fully. prev_index = index # Set up variables. boundary_length = len(boundary) boundary_end = boundary_length - 1 data_length = length boundary_chars = self.boundary_chars # If our index is 0, we're starting a new part, so start our # search. if index == 0: # Search forward until we either hit the end of our buffer, # or reach a character that's in our boundary. i += boundary_end while i < data_length - 1 and data[i] not in boundary_chars: i += boundary_length # Reset i back the length of our boundary, which is the # earliest possible location that could be our match (i.e. # if we've just broken out of our loop since we saw the # last character in our boundary) i -= boundary_end c = data[i] # Now, we have a couple of cases here. If our index is before # the end of the boundary... if index < boundary_length: # If the character matches... if boundary[index] == c: # If we found a match for our boundary, we send the # existing data. if index == 0: data_callback('part_data') # The current character matches, so continue! index += 1 else: index = 0 # Our index is equal to the length of our boundary! elif index == boundary_length: # First we increment it. index += 1 # Now, if we've reached a newline, we need to set this as # the potential end of our boundary. if c == CR: flags |= FLAG_PART_BOUNDARY # Otherwise, if this is a hyphen, we might be at the last # of all boundaries. elif c == HYPHEN: flags |= FLAG_LAST_BOUNDARY # Otherwise, we reset our index, since this isn't either a # newline or a hyphen. else: index = 0 # Our index is right after the part boundary, which should be # a LF. elif index == boundary_length + 1: # If we're at a part boundary (i.e. we've seen a CR # character already)... if flags & FLAG_PART_BOUNDARY: # We need a LF character next. if c == LF: # Unset the part boundary flag. flags &= (~FLAG_PART_BOUNDARY) # Callback indicating that we've reached the end of # a part, and are starting a new one. self.callback('part_end') self.callback('part_begin') # Move to parsing new headers. index = 0 state = STATE_HEADER_FIELD_START i += 1 continue # We didn't find an LF character, so no match. Reset # our index and clear our flag. index = 0 flags &= (~FLAG_PART_BOUNDARY) # Otherwise, if we're at the last boundary (i.e. we've # seen a hyphen already)... elif flags & FLAG_LAST_BOUNDARY: # We need a second hyphen here. if c == HYPHEN: # Callback to end the current part, and then the # message. self.callback('part_end') self.callback('end') state = STATE_END else: # No match, so reset index. index = 0 # If we have an index, we need to keep this byte for later, in # case we can't match the full boundary. if index > 0: self.lookbehind[index - 1] = c # Otherwise, our index is 0. If the previous index is not, it # means we reset something, and we need to take the data we # thought was part of our boundary and send it along as actual # data. elif prev_index > 0: # Callback to write the saved data. lb_data = join_bytes(self.lookbehind) self.callback('part_data', lb_data, 0, prev_index) # Overwrite our previous index. prev_index = 0 # Re-set our mark for part data. set_mark('part_data') # Re-consider the current character, since this could be # the start of the boundary itself. i -= 1 elif state == STATE_END: # Do nothing and just consume a byte in the end state. if c not in (CR, LF): self.logger.warning("Consuming a byte '0x%x' in the end state", c) else: # pragma: no cover (error case) # We got into a strange state somehow! Just stop processing. msg = "Reached an unknown state %d at %d" % (state, i) self.logger.warning(msg) e = MultipartParseError(msg) e.offset = i raise e # Move to the next byte. i += 1 # We call our callbacks with any remaining data. Note that we pass # the 'remaining' flag, which sets the mark back to 0 instead of # deleting it, if it's found. This is because, if the mark is found # at this point, we assume that there's data for one of these things # that has been parsed, but not yet emitted. And, as such, it implies # that we haven't yet reached the end of this 'thing'. So, by setting # the mark to 0, we cause any data callbacks that take place in future # calls to this function to start from the beginning of that buffer. data_callback('header_field', True) data_callback('header_value', True) data_callback('part_data', True) # Save values to locals. self.state = state self.index = index self.flags = flags # Return our data length to indicate no errors, and that we processed # all of it. return length def finalize(self): """Finalize this parser, which signals to that we are finished parsing. Note: It does not currently, but in the future, it will verify that we are in the final state of the parser (i.e. the end of the multipart message is well-formed), and, if not, throw an error. """ # TODO: verify that we're in the state STATE_END, otherwise throw an # error or otherwise state that we're not finished parsing. pass def __repr__(self): return f"{self.__class__.__name__}(boundary={self.boundary!r})" class FormParser: """This class is the all-in-one form parser. Given all the information necessary to parse a form, it will instantiate the correct parser, create the proper :class:`Field` and :class:`File` classes to store the data that is parsed, and call the two given callbacks with each field and file as they become available. :param content_type: The Content-Type of the incoming request. This is used to select the appropriate parser. :param on_field: The callback to call when a field has been parsed and is ready for usage. See above for parameters. :param on_file: The callback to call when a file has been parsed and is ready for usage. See above for parameters. :param on_end: An optional callback to call when all fields and files in a request has been parsed. Can be None. :param boundary: If the request is a multipart/form-data request, this should be the boundary of the request, as given in the Content-Type header, as a bytestring. :param file_name: If the request is of type application/octet-stream, then the body of the request will not contain any information about the uploaded file. In such cases, you can provide the file name of the uploaded file manually. :param FileClass: The class to use for uploaded files. Defaults to :class:`File`, but you can provide your own class if you wish to customize behaviour. The class will be instantiated as FileClass(file_name, field_name), and it must provide the following functions:: file_instance.write(data) file_instance.finalize() file_instance.close() :param FieldClass: The class to use for uploaded fields. Defaults to :class:`Field`, but you can provide your own class if you wish to customize behaviour. The class will be instantiated as FieldClass(field_name), and it must provide the following functions:: field_instance.write(data) field_instance.finalize() field_instance.close() :param config: Configuration to use for this FormParser. The default values are taken from the DEFAULT_CONFIG value, and then any keys present in this dictionary will overwrite the default values. """ #: This is the default configuration for our form parser. #: Note: all file sizes should be in bytes. DEFAULT_CONFIG = { 'MAX_BODY_SIZE': float('inf'), 'MAX_MEMORY_FILE_SIZE': 1 * 1024 * 1024, 'UPLOAD_DIR': None, 'UPLOAD_KEEP_FILENAME': False, 'UPLOAD_KEEP_EXTENSIONS': False, # Error on invalid Content-Transfer-Encoding? 'UPLOAD_ERROR_ON_BAD_CTE': False, } def __init__(self, content_type, on_field, on_file, on_end=None, boundary=None, file_name=None, FileClass=File, FieldClass=Field, config={}): self.logger = logging.getLogger(__name__) # Save variables. self.content_type = content_type self.boundary = boundary self.bytes_received = 0 self.parser = None # Save callbacks. self.on_field = on_field self.on_file = on_file self.on_end = on_end # Save classes. self.FileClass = File self.FieldClass = Field # Set configuration options. self.config = self.DEFAULT_CONFIG.copy() self.config.update(config) # Depending on the Content-Type, we instantiate the correct parser. if content_type == 'application/octet-stream': # Work around the lack of 'nonlocal' in Py2 class vars: f = None def on_start(): vars.f = FileClass(file_name, None, config=self.config) def on_data(data, start, end): vars.f.write(data[start:end]) def on_end(): # Finalize the file itself. vars.f.finalize() # Call our callback. on_file(vars.f) # Call the on-end callback. if self.on_end is not None: self.on_end() callbacks = { 'on_start': on_start, 'on_data': on_data, 'on_end': on_end, } # Instantiate an octet-stream parser parser = OctetStreamParser(callbacks, max_size=self.config['MAX_BODY_SIZE']) elif (content_type == 'application/x-www-form-urlencoded' or content_type == 'application/x-url-encoded'): name_buffer = [] class vars: f = None def on_field_start(): pass def on_field_name(data, start, end): name_buffer.append(data[start:end]) def on_field_data(data, start, end): if vars.f is None: vars.f = FieldClass(b''.join(name_buffer)) del name_buffer[:] vars.f.write(data[start:end]) def on_field_end(): # Finalize and call callback. if vars.f is None: # If we get here, it's because there was no field data. # We create a field, set it to None, and then continue. vars.f = FieldClass(b''.join(name_buffer)) del name_buffer[:] vars.f.set_none() vars.f.finalize() on_field(vars.f) vars.f = None def on_end(): if self.on_end is not None: self.on_end() # Setup callbacks. callbacks = { 'on_field_start': on_field_start, 'on_field_name': on_field_name, 'on_field_data': on_field_data, 'on_field_end': on_field_end, 'on_end': on_end, } # Instantiate parser. parser = QuerystringParser( callbacks=callbacks, max_size=self.config['MAX_BODY_SIZE'] ) elif content_type == 'multipart/form-data': if boundary is None: self.logger.error("No boundary given") raise FormParserError("No boundary given") header_name = [] header_value = [] headers = {} # No 'nonlocal' on Python 2 :-( class vars: f = None writer = None is_file = False def on_part_begin(): pass def on_part_data(data, start, end): bytes_processed = vars.writer.write(data[start:end]) # TODO: check for error here. return bytes_processed def on_part_end(): vars.f.finalize() if vars.is_file: on_file(vars.f) else: on_field(vars.f) def on_header_field(data, start, end): header_name.append(data[start:end]) def on_header_value(data, start, end): header_value.append(data[start:end]) def on_header_end(): headers[b''.join(header_name)] = b''.join(header_value) del header_name[:] del header_value[:] def on_headers_finished(): # Reset the 'is file' flag. vars.is_file = False # Parse the content-disposition header. # TODO: handle mixed case content_disp = headers.get(b'Content-Disposition') disp, options = parse_options_header(content_disp) # Get the field and filename. field_name = options.get(b'name') file_name = options.get(b'filename') # TODO: check for errors # Create the proper class. if file_name is None: vars.f = FieldClass(field_name) else: vars.f = FileClass(file_name, field_name, config=self.config) vars.is_file = True # Parse the given Content-Transfer-Encoding to determine what # we need to do with the incoming data. # TODO: check that we properly handle 8bit / 7bit encoding. transfer_encoding = headers.get(b'Content-Transfer-Encoding', b'7bit') if (transfer_encoding == b'binary' or transfer_encoding == b'8bit' or transfer_encoding == b'7bit'): vars.writer = vars.f elif transfer_encoding == b'base64': vars.writer = Base64Decoder(vars.f) elif transfer_encoding == b'quoted-printable': vars.writer = QuotedPrintableDecoder(vars.f) else: self.logger.warning("Unknown Content-Transfer-Encoding: " "%r", transfer_encoding) if self.config['UPLOAD_ERROR_ON_BAD_CTE']: raise FormParserError( 'Unknown Content-Transfer-Encoding "{}"'.format( transfer_encoding ) ) else: # If we aren't erroring, then we just treat this as an # unencoded Content-Transfer-Encoding. vars.writer = vars.f def on_end(): vars.writer.finalize() if self.on_end is not None: self.on_end() # These are our callbacks for the parser. callbacks = { 'on_part_begin': on_part_begin, 'on_part_data': on_part_data, 'on_part_end': on_part_end, 'on_header_field': on_header_field, 'on_header_value': on_header_value, 'on_header_end': on_header_end, 'on_headers_finished': on_headers_finished, 'on_end': on_end, } # Instantiate a multipart parser. parser = MultipartParser(boundary, callbacks, max_size=self.config['MAX_BODY_SIZE']) else: self.logger.warning("Unknown Content-Type: %r", content_type) raise FormParserError("Unknown Content-Type: {}".format( content_type )) self.parser = parser def write(self, data): """Write some data. The parser will forward this to the appropriate underlying parser. :param data: a bytestring """ self.bytes_received += len(data) # TODO: check the parser's return value for errors? return self.parser.write(data) def finalize(self): """Finalize the parser.""" if self.parser is not None and hasattr(self.parser, 'finalize'): self.parser.finalize() def close(self): """Close the parser.""" if self.parser is not None and hasattr(self.parser, 'close'): self.parser.close() def __repr__(self): return "{}(content_type={!r}, parser={!r})".format( self.__class__.__name__, self.content_type, self.parser, ) def create_form_parser(headers, on_field, on_file, trust_x_headers=False, config={}): """This function is a helper function to aid in creating a FormParser instances. Given a dictionary-like headers object, it will determine the correct information needed, instantiate a FormParser with the appropriate values and given callbacks, and then return the corresponding parser. :param headers: A dictionary-like object of HTTP headers. The only required header is Content-Type. :param on_field: Callback to call with each parsed field. :param on_file: Callback to call with each parsed file. :param trust_x_headers: Whether or not to trust information received from certain X-Headers - for example, the file name from X-File-Name. :param config: Configuration variables to pass to the FormParser. """ content_type = headers.get('Content-Type') if content_type is None: logging.getLogger(__name__).warning("No Content-Type header given") raise ValueError("No Content-Type header given!") # Boundaries are optional (the FormParser will raise if one is needed # but not given). content_type, params = parse_options_header(content_type) boundary = params.get(b'boundary') # We need content_type to be a string, not a bytes object. content_type = content_type.decode('latin-1') # File names are optional. file_name = headers.get('X-File-Name') # Instantiate a form parser. form_parser = FormParser(content_type, on_field, on_file, boundary=boundary, file_name=file_name, config=config) # Return our parser. return form_parser def parse_form(headers, input_stream, on_field, on_file, chunk_size=1048576, **kwargs): """This function is useful if you just want to parse a request body, without too much work. Pass it a dictionary-like object of the request's headers, and a file-like object for the input stream, along with two callbacks that will get called whenever a field or file is parsed. :param headers: A dictionary-like object of HTTP headers. The only required header is Content-Type. :param input_stream: A file-like object that represents the request body. The read() method must return bytestrings. :param on_field: Callback to call with each parsed field. :param on_file: Callback to call with each parsed file. :param chunk_size: The maximum size to read from the input stream and write to the parser at one time. Defaults to 1 MiB. """ # Create our form parser. parser = create_form_parser(headers, on_field, on_file) # Read chunks of 100KiB and write to the parser, but never read more than # the given Content-Length, if any. content_length = headers.get('Content-Length') if content_length is not None: content_length = int(content_length) else: content_length = float('inf') bytes_read = 0 while True: # Read only up to the Content-Length given. max_readable = min(content_length - bytes_read, 1048576) buff = input_stream.read(max_readable) # Write to the parser and update our length. parser.write(buff) bytes_read += len(buff) # If we get a buffer that's smaller than the size requested, or if we # have read up to our content length, we're done. if len(buff) != max_readable or bytes_read == content_length: break # Tell our parser that we're done writing data. parser.finalize()
71,230
Python
36.608765
109
0.524821
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multipart/decoders.py
import base64 import binascii from .exceptions import DecodeError class Base64Decoder: """This object provides an interface to decode a stream of Base64 data. It is instantiated with an "underlying object", and whenever a write() operation is performed, it will decode the incoming data as Base64, and call write() on the underlying object. This is primarily used for decoding form data encoded as Base64, but can be used for other purposes:: from multipart.decoders import Base64Decoder fd = open("notb64.txt", "wb") decoder = Base64Decoder(fd) try: decoder.write("Zm9vYmFy") # "foobar" in Base64 decoder.finalize() finally: decoder.close() # The contents of "notb64.txt" should be "foobar". This object will also pass all finalize() and close() calls to the underlying object, if the underlying object supports them. Note that this class maintains a cache of base64 chunks, so that a write of arbitrary size can be performed. You must call :meth:`finalize` on this object after all writes are completed to ensure that all data is flushed to the underlying object. :param underlying: the underlying object to pass writes to """ def __init__(self, underlying): self.cache = bytearray() self.underlying = underlying def write(self, data): """Takes any input data provided, decodes it as base64, and passes it on to the underlying object. If the data provided is invalid base64 data, then this method will raise a :class:`multipart.exceptions.DecodeError` :param data: base64 data to decode """ # Prepend any cache info to our data. if len(self.cache) > 0: data = self.cache + data # Slice off a string that's a multiple of 4. decode_len = (len(data) // 4) * 4 val = data[:decode_len] # Decode and write, if we have any. if len(val) > 0: try: decoded = base64.b64decode(val) except binascii.Error: raise DecodeError('There was an error raised while decoding ' 'base64-encoded data.') self.underlying.write(decoded) # Get the remaining bytes and save in our cache. remaining_len = len(data) % 4 if remaining_len > 0: self.cache = data[-remaining_len:] else: self.cache = b'' # Return the length of the data to indicate no error. return len(data) def close(self): """Close this decoder. If the underlying object has a `close()` method, this function will call it. """ if hasattr(self.underlying, 'close'): self.underlying.close() def finalize(self): """Finalize this object. This should be called when no more data should be written to the stream. This function can raise a :class:`multipart.exceptions.DecodeError` if there is some remaining data in the cache. If the underlying object has a `finalize()` method, this function will call it. """ if len(self.cache) > 0: raise DecodeError('There are %d bytes remaining in the ' 'Base64Decoder cache when finalize() is called' % len(self.cache)) if hasattr(self.underlying, 'finalize'): self.underlying.finalize() def __repr__(self): return f"{self.__class__.__name__}(underlying={self.underlying!r})" class QuotedPrintableDecoder: """This object provides an interface to decode a stream of quoted-printable data. It is instantiated with an "underlying object", in the same manner as the :class:`multipart.decoders.Base64Decoder` class. This class behaves in exactly the same way, including maintaining a cache of quoted-printable chunks. :param underlying: the underlying object to pass writes to """ def __init__(self, underlying): self.cache = b'' self.underlying = underlying def write(self, data): """Takes any input data provided, decodes it as quoted-printable, and passes it on to the underlying object. :param data: quoted-printable data to decode """ # Prepend any cache info to our data. if len(self.cache) > 0: data = self.cache + data # If the last 2 characters have an '=' sign in it, then we won't be # able to decode the encoded value and we'll need to save it for the # next decoding step. if data[-2:].find(b'=') != -1: enc, rest = data[:-2], data[-2:] else: enc = data rest = b'' # Encode and write, if we have data. if len(enc) > 0: self.underlying.write(binascii.a2b_qp(enc)) # Save remaining in cache. self.cache = rest return len(data) def close(self): """Close this decoder. If the underlying object has a `close()` method, this function will call it. """ if hasattr(self.underlying, 'close'): self.underlying.close() def finalize(self): """Finalize this object. This should be called when no more data should be written to the stream. This function will not raise any exceptions, but it may write more data to the underlying object if there is data remaining in the cache. If the underlying object has a `finalize()` method, this function will call it. """ # If we have a cache, write and then remove it. if len(self.cache) > 0: self.underlying.write(binascii.a2b_qp(self.cache)) self.cache = b'' # Finalize our underlying stream. if hasattr(self.underlying, 'finalize'): self.underlying.finalize() def __repr__(self): return f"{self.__class__.__name__}(underlying={self.underlying!r})"
6,107
Python
34.511628
79
0.606681
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multipart/__init__.py
# This is the canonical package information. __author__ = 'Andrew Dunham' __license__ = 'Apache' __copyright__ = "Copyright (c) 2012-2013, Andrew Dunham" __version__ = "0.0.6" from .multipart import ( FormParser, MultipartParser, QuerystringParser, OctetStreamParser, create_form_parser, parse_form, )
335
Python
19.999999
56
0.653731
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multipart/tests/test_multipart.py
import os import sys import glob import yaml import base64 import random import tempfile import unittest from .compat import ( parametrize, parametrize_class, slow_test, ) from io import BytesIO from unittest.mock import MagicMock, Mock, patch from ..multipart import * # Get the current directory for our later test cases. curr_dir = os.path.abspath(os.path.dirname(__file__)) def force_bytes(val): if isinstance(val, str): val = val.encode(sys.getfilesystemencoding()) return val class TestField(unittest.TestCase): def setUp(self): self.f = Field('foo') def test_name(self): self.assertEqual(self.f.field_name, 'foo') def test_data(self): self.f.write(b'test123') self.assertEqual(self.f.value, b'test123') def test_cache_expiration(self): self.f.write(b'test') self.assertEqual(self.f.value, b'test') self.f.write(b'123') self.assertEqual(self.f.value, b'test123') def test_finalize(self): self.f.write(b'test123') self.f.finalize() self.assertEqual(self.f.value, b'test123') def test_close(self): self.f.write(b'test123') self.f.close() self.assertEqual(self.f.value, b'test123') def test_from_value(self): f = Field.from_value(b'name', b'value') self.assertEqual(f.field_name, b'name') self.assertEqual(f.value, b'value') f2 = Field.from_value(b'name', None) self.assertEqual(f2.value, None) def test_equality(self): f1 = Field.from_value(b'name', b'value') f2 = Field.from_value(b'name', b'value') self.assertEqual(f1, f2) def test_equality_with_other(self): f = Field.from_value(b'foo', b'bar') self.assertFalse(f == b'foo') self.assertFalse(b'foo' == f) def test_set_none(self): f = Field(b'foo') self.assertEqual(f.value, b'') f.set_none() self.assertEqual(f.value, None) class TestFile(unittest.TestCase): def setUp(self): self.c = {} self.d = force_bytes(tempfile.mkdtemp()) self.f = File(b'foo.txt', config=self.c) def assert_data(self, data): f = self.f.file_object f.seek(0) self.assertEqual(f.read(), data) f.seek(0) f.truncate() def assert_exists(self): full_path = os.path.join(self.d, self.f.actual_file_name) self.assertTrue(os.path.exists(full_path)) def test_simple(self): self.f.write(b'foobar') self.assert_data(b'foobar') def test_invalid_write(self): m = Mock() m.write.return_value = 5 self.f._fileobj = m v = self.f.write(b'foobar') self.assertEqual(v, 5) def test_file_fallback(self): self.c['MAX_MEMORY_FILE_SIZE'] = 1 self.f.write(b'1') self.assertTrue(self.f.in_memory) self.assert_data(b'1') self.f.write(b'123') self.assertFalse(self.f.in_memory) self.assert_data(b'123') # Test flushing too. old_obj = self.f.file_object self.f.flush_to_disk() self.assertFalse(self.f.in_memory) self.assertIs(self.f.file_object, old_obj) def test_file_fallback_with_data(self): self.c['MAX_MEMORY_FILE_SIZE'] = 10 self.f.write(b'1' * 10) self.assertTrue(self.f.in_memory) self.f.write(b'2' * 10) self.assertFalse(self.f.in_memory) self.assert_data(b'11111111112222222222') def test_file_name(self): # Write to this dir. self.c['UPLOAD_DIR'] = self.d self.c['MAX_MEMORY_FILE_SIZE'] = 10 # Write. self.f.write(b'12345678901') self.assertFalse(self.f.in_memory) # Assert that the file exists self.assertIsNotNone(self.f.actual_file_name) self.assert_exists() def test_file_full_name(self): # Write to this dir. self.c['UPLOAD_DIR'] = self.d self.c['UPLOAD_KEEP_FILENAME'] = True self.c['MAX_MEMORY_FILE_SIZE'] = 10 # Write. self.f.write(b'12345678901') self.assertFalse(self.f.in_memory) # Assert that the file exists self.assertEqual(self.f.actual_file_name, b'foo') self.assert_exists() def test_file_full_name_with_ext(self): self.c['UPLOAD_DIR'] = self.d self.c['UPLOAD_KEEP_FILENAME'] = True self.c['UPLOAD_KEEP_EXTENSIONS'] = True self.c['MAX_MEMORY_FILE_SIZE'] = 10 # Write. self.f.write(b'12345678901') self.assertFalse(self.f.in_memory) # Assert that the file exists self.assertEqual(self.f.actual_file_name, b'foo.txt') self.assert_exists() def test_file_full_name_with_ext(self): self.c['UPLOAD_DIR'] = self.d self.c['UPLOAD_KEEP_FILENAME'] = True self.c['UPLOAD_KEEP_EXTENSIONS'] = True self.c['MAX_MEMORY_FILE_SIZE'] = 10 # Write. self.f.write(b'12345678901') self.assertFalse(self.f.in_memory) # Assert that the file exists self.assertEqual(self.f.actual_file_name, b'foo.txt') self.assert_exists() def test_no_dir_with_extension(self): self.c['UPLOAD_KEEP_EXTENSIONS'] = True self.c['MAX_MEMORY_FILE_SIZE'] = 10 # Write. self.f.write(b'12345678901') self.assertFalse(self.f.in_memory) # Assert that the file exists ext = os.path.splitext(self.f.actual_file_name)[1] self.assertEqual(ext, b'.txt') self.assert_exists() def test_invalid_dir_with_name(self): # Write to this dir. self.c['UPLOAD_DIR'] = force_bytes(os.path.join('/', 'tmp', 'notexisting')) self.c['UPLOAD_KEEP_FILENAME'] = True self.c['MAX_MEMORY_FILE_SIZE'] = 5 # Write. with self.assertRaises(FileError): self.f.write(b'1234567890') def test_invalid_dir_no_name(self): # Write to this dir. self.c['UPLOAD_DIR'] = force_bytes(os.path.join('/', 'tmp', 'notexisting')) self.c['UPLOAD_KEEP_FILENAME'] = False self.c['MAX_MEMORY_FILE_SIZE'] = 5 # Write. with self.assertRaises(FileError): self.f.write(b'1234567890') # TODO: test uploading two files with the same name. class TestParseOptionsHeader(unittest.TestCase): def test_simple(self): t, p = parse_options_header('application/json') self.assertEqual(t, b'application/json') self.assertEqual(p, {}) def test_blank(self): t, p = parse_options_header('') self.assertEqual(t, b'') self.assertEqual(p, {}) def test_single_param(self): t, p = parse_options_header('application/json;par=val') self.assertEqual(t, b'application/json') self.assertEqual(p, {b'par': b'val'}) def test_single_param_with_spaces(self): t, p = parse_options_header(b'application/json; par=val') self.assertEqual(t, b'application/json') self.assertEqual(p, {b'par': b'val'}) def test_multiple_params(self): t, p = parse_options_header(b'application/json;par=val;asdf=foo') self.assertEqual(t, b'application/json') self.assertEqual(p, {b'par': b'val', b'asdf': b'foo'}) def test_quoted_param(self): t, p = parse_options_header(b'application/json;param="quoted"') self.assertEqual(t, b'application/json') self.assertEqual(p, {b'param': b'quoted'}) def test_quoted_param_with_semicolon(self): t, p = parse_options_header(b'application/json;param="quoted;with;semicolons"') self.assertEqual(p[b'param'], b'quoted;with;semicolons') def test_quoted_param_with_escapes(self): t, p = parse_options_header(b'application/json;param="This \\" is \\" a \\" quote"') self.assertEqual(p[b'param'], b'This " is " a " quote') def test_handles_ie6_bug(self): t, p = parse_options_header(b'text/plain; filename="C:\\this\\is\\a\\path\\file.txt"') self.assertEqual(p[b'filename'], b'file.txt') class TestBaseParser(unittest.TestCase): def setUp(self): self.b = BaseParser() self.b.callbacks = {} def test_callbacks(self): # The stupid list-ness is to get around lack of nonlocal on py2 l = [0] def on_foo(): l[0] += 1 self.b.set_callback('foo', on_foo) self.b.callback('foo') self.assertEqual(l[0], 1) self.b.set_callback('foo', None) self.b.callback('foo') self.assertEqual(l[0], 1) class TestQuerystringParser(unittest.TestCase): def assert_fields(self, *args, **kwargs): if kwargs.pop('finalize', True): self.p.finalize() self.assertEqual(self.f, list(args)) if kwargs.get('reset', True): self.f = [] def setUp(self): self.reset() def reset(self): self.f = [] name_buffer = [] data_buffer = [] def on_field_name(data, start, end): name_buffer.append(data[start:end]) def on_field_data(data, start, end): data_buffer.append(data[start:end]) def on_field_end(): self.f.append(( b''.join(name_buffer), b''.join(data_buffer) )) del name_buffer[:] del data_buffer[:] callbacks = { 'on_field_name': on_field_name, 'on_field_data': on_field_data, 'on_field_end': on_field_end } self.p = QuerystringParser(callbacks) def test_simple_querystring(self): self.p.write(b'foo=bar') self.assert_fields((b'foo', b'bar')) def test_querystring_blank_beginning(self): self.p.write(b'&foo=bar') self.assert_fields((b'foo', b'bar')) def test_querystring_blank_end(self): self.p.write(b'foo=bar&') self.assert_fields((b'foo', b'bar')) def test_multiple_querystring(self): self.p.write(b'foo=bar&asdf=baz') self.assert_fields( (b'foo', b'bar'), (b'asdf', b'baz') ) def test_streaming_simple(self): self.p.write(b'foo=bar&') self.assert_fields( (b'foo', b'bar'), finalize=False ) self.p.write(b'asdf=baz') self.assert_fields( (b'asdf', b'baz') ) def test_streaming_break(self): self.p.write(b'foo=one') self.assert_fields(finalize=False) self.p.write(b'two') self.assert_fields(finalize=False) self.p.write(b'three') self.assert_fields(finalize=False) self.p.write(b'&asd') self.assert_fields( (b'foo', b'onetwothree'), finalize=False ) self.p.write(b'f=baz') self.assert_fields( (b'asdf', b'baz') ) def test_semicolon_separator(self): self.p.write(b'foo=bar;asdf=baz') self.assert_fields( (b'foo', b'bar'), (b'asdf', b'baz') ) def test_too_large_field(self): self.p.max_size = 15 # Note: len = 8 self.p.write(b"foo=bar&") self.assert_fields((b'foo', b'bar'), finalize=False) # Note: len = 8, only 7 bytes processed self.p.write(b'a=123456') self.assert_fields((b'a', b'12345')) def test_invalid_max_size(self): with self.assertRaises(ValueError): p = QuerystringParser(max_size=-100) def test_strict_parsing_pass(self): data = b'foo=bar&another=asdf' for first, last in split_all(data): self.reset() self.p.strict_parsing = True print(f"{first!r} / {last!r}") self.p.write(first) self.p.write(last) self.assert_fields((b'foo', b'bar'), (b'another', b'asdf')) def test_strict_parsing_fail_double_sep(self): data = b'foo=bar&&another=asdf' for first, last in split_all(data): self.reset() self.p.strict_parsing = True cnt = 0 with self.assertRaises(QuerystringParseError) as cm: cnt += self.p.write(first) cnt += self.p.write(last) self.p.finalize() # The offset should occur at 8 bytes into the data (as a whole), # so we calculate the offset into the chunk. if cm is not None: self.assertEqual(cm.exception.offset, 8 - cnt) def test_double_sep(self): data = b'foo=bar&&another=asdf' for first, last in split_all(data): print(f" {first!r} / {last!r} ") self.reset() cnt = 0 cnt += self.p.write(first) cnt += self.p.write(last) self.assert_fields((b'foo', b'bar'), (b'another', b'asdf')) def test_strict_parsing_fail_no_value(self): self.p.strict_parsing = True with self.assertRaises(QuerystringParseError) as cm: self.p.write(b'foo=bar&blank&another=asdf') if cm is not None: self.assertEqual(cm.exception.offset, 8) def test_success_no_value(self): self.p.write(b'foo=bar&blank&another=asdf') self.assert_fields( (b'foo', b'bar'), (b'blank', b''), (b'another', b'asdf') ) def test_repr(self): # Issue #29; verify we don't assert on repr() _ignored = repr(self.p) class TestOctetStreamParser(unittest.TestCase): def setUp(self): self.d = [] self.started = 0 self.finished = 0 def on_start(): self.started += 1 def on_data(data, start, end): self.d.append(data[start:end]) def on_end(): self.finished += 1 callbacks = { 'on_start': on_start, 'on_data': on_data, 'on_end': on_end } self.p = OctetStreamParser(callbacks) def assert_data(self, data, finalize=True): self.assertEqual(b''.join(self.d), data) self.d = [] def assert_started(self, val=True): if val: self.assertEqual(self.started, 1) else: self.assertEqual(self.started, 0) def assert_finished(self, val=True): if val: self.assertEqual(self.finished, 1) else: self.assertEqual(self.finished, 0) def test_simple(self): # Assert is not started self.assert_started(False) # Write something, it should then be started + have data self.p.write(b'foobar') self.assert_started() self.assert_data(b'foobar') # Finalize, and check self.assert_finished(False) self.p.finalize() self.assert_finished() def test_multiple_chunks(self): self.p.write(b'foo') self.p.write(b'bar') self.p.write(b'baz') self.p.finalize() self.assert_data(b'foobarbaz') self.assert_finished() def test_max_size(self): self.p.max_size = 5 self.p.write(b'0123456789') self.p.finalize() self.assert_data(b'01234') self.assert_finished() def test_invalid_max_size(self): with self.assertRaises(ValueError): q = OctetStreamParser(max_size='foo') class TestBase64Decoder(unittest.TestCase): # Note: base64('foobar') == 'Zm9vYmFy' def setUp(self): self.f = BytesIO() self.d = Base64Decoder(self.f) def assert_data(self, data, finalize=True): if finalize: self.d.finalize() self.f.seek(0) self.assertEqual(self.f.read(), data) self.f.seek(0) self.f.truncate() def test_simple(self): self.d.write(b'Zm9vYmFy') self.assert_data(b'foobar') def test_bad(self): with self.assertRaises(DecodeError): self.d.write(b'Zm9v!mFy') def test_split_properly(self): self.d.write(b'Zm9v') self.d.write(b'YmFy') self.assert_data(b'foobar') def test_bad_split(self): buff = b'Zm9v' for i in range(1, 4): first, second = buff[:i], buff[i:] self.setUp() self.d.write(first) self.d.write(second) self.assert_data(b'foo') def test_long_bad_split(self): buff = b'Zm9vYmFy' for i in range(5, 8): first, second = buff[:i], buff[i:] self.setUp() self.d.write(first) self.d.write(second) self.assert_data(b'foobar') def test_close_and_finalize(self): parser = Mock() f = Base64Decoder(parser) f.finalize() parser.finalize.assert_called_once_with() f.close() parser.close.assert_called_once_with() def test_bad_length(self): self.d.write(b'Zm9vYmF') # missing ending 'y' with self.assertRaises(DecodeError): self.d.finalize() class TestQuotedPrintableDecoder(unittest.TestCase): def setUp(self): self.f = BytesIO() self.d = QuotedPrintableDecoder(self.f) def assert_data(self, data, finalize=True): if finalize: self.d.finalize() self.f.seek(0) self.assertEqual(self.f.read(), data) self.f.seek(0) self.f.truncate() def test_simple(self): self.d.write(b'foobar') self.assert_data(b'foobar') def test_with_escape(self): self.d.write(b'foo=3Dbar') self.assert_data(b'foo=bar') def test_with_newline_escape(self): self.d.write(b'foo=\r\nbar') self.assert_data(b'foobar') def test_with_only_newline_escape(self): self.d.write(b'foo=\nbar') self.assert_data(b'foobar') def test_with_split_escape(self): self.d.write(b'foo=3') self.d.write(b'Dbar') self.assert_data(b'foo=bar') def test_with_split_newline_escape_1(self): self.d.write(b'foo=\r') self.d.write(b'\nbar') self.assert_data(b'foobar') def test_with_split_newline_escape_2(self): self.d.write(b'foo=') self.d.write(b'\r\nbar') self.assert_data(b'foobar') def test_close_and_finalize(self): parser = Mock() f = QuotedPrintableDecoder(parser) f.finalize() parser.finalize.assert_called_once_with() f.close() parser.close.assert_called_once_with() def test_not_aligned(self): """ https://github.com/andrew-d/python-multipart/issues/6 """ self.d.write(b'=3AX') self.assert_data(b':X') # Additional offset tests self.d.write(b'=3') self.d.write(b'AX') self.assert_data(b':X') self.d.write(b'q=3AX') self.assert_data(b'q:X') # Load our list of HTTP test cases. http_tests_dir = os.path.join(curr_dir, 'test_data', 'http') # Read in all test cases and load them. NON_PARAMETRIZED_TESTS = {'single_field_blocks'} http_tests = [] for f in os.listdir(http_tests_dir): # Only load the HTTP test cases. fname, ext = os.path.splitext(f) if fname in NON_PARAMETRIZED_TESTS: continue if ext == '.http': # Get the YAML file and load it too. yaml_file = os.path.join(http_tests_dir, fname + '.yaml') # Load both. with open(os.path.join(http_tests_dir, f), 'rb') as f: test_data = f.read() with open(yaml_file, 'rb') as f: yaml_data = yaml.safe_load(f) http_tests.append({ 'name': fname, 'test': test_data, 'result': yaml_data }) def split_all(val): """ This function will split an array all possible ways. For example: split_all([1,2,3,4]) will give: ([1], [2,3,4]), ([1,2], [3,4]), ([1,2,3], [4]) """ for i in range(1, len(val) - 1): yield (val[:i], val[i:]) @parametrize_class class TestFormParser(unittest.TestCase): def make(self, boundary, config={}): self.ended = False self.files = [] self.fields = [] def on_field(f): self.fields.append(f) def on_file(f): self.files.append(f) def on_end(): self.ended = True # Get a form-parser instance. self.f = FormParser('multipart/form-data', on_field, on_file, on_end, boundary=boundary, config=config) def assert_file_data(self, f, data): o = f.file_object o.seek(0) file_data = o.read() self.assertEqual(file_data, data) def assert_file(self, field_name, file_name, data): # Find this file. found = None for f in self.files: if f.field_name == field_name: found = f break # Assert that we found it. self.assertIsNotNone(found) try: # Assert about this file. self.assert_file_data(found, data) self.assertEqual(found.file_name, file_name) # Remove it from our list. self.files.remove(found) finally: # Close our file found.close() def assert_field(self, name, value): # Find this field in our fields list. found = None for f in self.fields: if f.field_name == name: found = f break # Assert that it exists and matches. self.assertIsNotNone(found) self.assertEqual(value, found.value) # Remove it for future iterations. self.fields.remove(found) @parametrize('param', http_tests) def test_http(self, param): # Firstly, create our parser with the given boundary. boundary = param['result']['boundary'] if isinstance(boundary, str): boundary = boundary.encode('latin-1') self.make(boundary) # Now, we feed the parser with data. exc = None try: processed = self.f.write(param['test']) self.f.finalize() except MultipartParseError as e: processed = 0 exc = e # print(repr(param)) # print("") # print(repr(self.fields)) # print(repr(self.files)) # Do we expect an error? if 'error' in param['result']['expected']: self.assertIsNotNone(exc) self.assertEqual(param['result']['expected']['error'], exc.offset) return # No error! self.assertEqual(processed, len(param['test'])) # Assert that the parser gave us the appropriate fields/files. for e in param['result']['expected']: # Get our type and name. type = e['type'] name = e['name'].encode('latin-1') if type == 'field': self.assert_field(name, e['data']) elif type == 'file': self.assert_file( name, e['file_name'].encode('latin-1'), e['data'] ) else: assert False def test_random_splitting(self): """ This test runs a simple multipart body with one field and one file through every possible split. """ # Load test data. test_file = 'single_field_single_file.http' with open(os.path.join(http_tests_dir, test_file), 'rb') as f: test_data = f.read() # We split the file through all cases. for first, last in split_all(test_data): # Create form parser. self.make('boundary') # Feed with data in 2 chunks. i = 0 i += self.f.write(first) i += self.f.write(last) self.f.finalize() # Assert we processed everything. self.assertEqual(i, len(test_data)) # Assert that our file and field are here. self.assert_field(b'field', b'test1') self.assert_file(b'file', b'file.txt', b'test2') def test_feed_single_bytes(self): """ This test parses a simple multipart body 1 byte at a time. """ # Load test data. test_file = 'single_field_single_file.http' with open(os.path.join(http_tests_dir, test_file), 'rb') as f: test_data = f.read() # Create form parser. self.make('boundary') # Write all bytes. # NOTE: Can't simply do `for b in test_data`, since that gives # an integer when iterating over a bytes object on Python 3. i = 0 for x in range(len(test_data)): b = test_data[x:x + 1] i += self.f.write(b) self.f.finalize() # Assert we processed everything. self.assertEqual(i, len(test_data)) # Assert that our file and field are here. self.assert_field(b'field', b'test1') self.assert_file(b'file', b'file.txt', b'test2') def test_feed_blocks(self): """ This test parses a simple multipart body 1 byte at a time. """ # Load test data. test_file = 'single_field_blocks.http' with open(os.path.join(http_tests_dir, test_file), 'rb') as f: test_data = f.read() for c in range(1, len(test_data) + 1): # Skip first `d` bytes - not interesting for d in range(c): # Create form parser. self.make('boundary') # Skip i = 0 self.f.write(test_data[:d]) i += d for x in range(d, len(test_data), c): # Write a chunk to achieve condition # `i == data_length - 1` # in boundary search loop (multipatr.py:1302) b = test_data[x:x + c] i += self.f.write(b) self.f.finalize() # Assert we processed everything. self.assertEqual(i, len(test_data)) # Assert that our field is here. self.assert_field(b'field', b'0123456789ABCDEFGHIJ0123456789ABCDEFGHIJ') @slow_test def test_request_body_fuzz(self): """ This test randomly fuzzes the request body to ensure that no strange exceptions are raised and we don't end up in a strange state. The fuzzing consists of randomly doing one of the following: - Adding a random byte at a random offset - Randomly deleting a single byte - Randomly swapping two bytes """ # Load test data. test_file = 'single_field_single_file.http' with open(os.path.join(http_tests_dir, test_file), 'rb') as f: test_data = f.read() iterations = 1000 successes = 0 failures = 0 exceptions = 0 print("Running %d iterations of fuzz testing:" % (iterations,)) for i in range(iterations): # Create a bytearray to mutate. fuzz_data = bytearray(test_data) # Pick what we're supposed to do. choice = random.choice([1, 2, 3]) if choice == 1: # Add a random byte. i = random.randrange(len(test_data)) b = random.randrange(256) fuzz_data.insert(i, b) msg = "Inserting byte %r at offset %d" % (b, i) elif choice == 2: # Remove a random byte. i = random.randrange(len(test_data)) del fuzz_data[i] msg = "Deleting byte at offset %d" % (i,) elif choice == 3: # Swap two bytes. i = random.randrange(len(test_data) - 1) fuzz_data[i], fuzz_data[i + 1] = fuzz_data[i + 1], fuzz_data[i] msg = "Swapping bytes %d and %d" % (i, i + 1) # Print message, so if this crashes, we can inspect the output. print(" " + msg) # Create form parser. self.make('boundary') # Feed with data, and ignore form parser exceptions. i = 0 try: i = self.f.write(bytes(fuzz_data)) self.f.finalize() except FormParserError: exceptions += 1 else: if i == len(fuzz_data): successes += 1 else: failures += 1 print("--------------------------------------------------") print("Successes: %d" % (successes,)) print("Failures: %d" % (failures,)) print("Exceptions: %d" % (exceptions,)) @slow_test def test_request_body_fuzz_random_data(self): """ This test will fuzz the multipart parser with some number of iterations of randomly-generated data. """ iterations = 1000 successes = 0 failures = 0 exceptions = 0 print("Running %d iterations of fuzz testing:" % (iterations,)) for i in range(iterations): data_size = random.randrange(100, 4096) data = os.urandom(data_size) print(" Testing with %d random bytes..." % (data_size,)) # Create form parser. self.make('boundary') # Feed with data, and ignore form parser exceptions. i = 0 try: i = self.f.write(bytes(data)) self.f.finalize() except FormParserError: exceptions += 1 else: if i == len(data): successes += 1 else: failures += 1 print("--------------------------------------------------") print("Successes: %d" % (successes,)) print("Failures: %d" % (failures,)) print("Exceptions: %d" % (exceptions,)) def test_bad_start_boundary(self): self.make('boundary') data = b'--boundary\rfoobar' with self.assertRaises(MultipartParseError): self.f.write(data) self.make('boundary') data = b'--boundaryfoobar' with self.assertRaises(MultipartParseError): i = self.f.write(data) def test_octet_stream(self): files = [] def on_file(f): files.append(f) on_field = Mock() on_end = Mock() f = FormParser('application/octet-stream', on_field, on_file, on_end=on_end, file_name=b'foo.txt') self.assertTrue(isinstance(f.parser, OctetStreamParser)) f.write(b'test') f.write(b'1234') f.finalize() # Assert that we only received a single file, with the right data, and that we're done. self.assertFalse(on_field.called) self.assertEqual(len(files), 1) self.assert_file_data(files[0], b'test1234') self.assertTrue(on_end.called) def test_querystring(self): fields = [] def on_field(f): fields.append(f) on_file = Mock() on_end = Mock() def simple_test(f): # Reset tracking. del fields[:] on_file.reset_mock() on_end.reset_mock() # Write test data. f.write(b'foo=bar') f.write(b'&test=asdf') f.finalize() # Assert we only received 2 fields... self.assertFalse(on_file.called) self.assertEqual(len(fields), 2) # ...assert that we have the correct data... self.assertEqual(fields[0].field_name, b'foo') self.assertEqual(fields[0].value, b'bar') self.assertEqual(fields[1].field_name, b'test') self.assertEqual(fields[1].value, b'asdf') # ... and assert that we've finished. self.assertTrue(on_end.called) f = FormParser('application/x-www-form-urlencoded', on_field, on_file, on_end=on_end) self.assertTrue(isinstance(f.parser, QuerystringParser)) simple_test(f) f = FormParser('application/x-url-encoded', on_field, on_file, on_end=on_end) self.assertTrue(isinstance(f.parser, QuerystringParser)) simple_test(f) def test_close_methods(self): parser = Mock() f = FormParser('application/x-url-encoded', None, None) f.parser = parser f.finalize() parser.finalize.assert_called_once_with() f.close() parser.close.assert_called_once_with() def test_bad_content_type(self): # We should raise a ValueError for a bad Content-Type with self.assertRaises(ValueError): f = FormParser('application/bad', None, None) def test_no_boundary_given(self): # We should raise a FormParserError when parsing a multipart message # without a boundary. with self.assertRaises(FormParserError): f = FormParser('multipart/form-data', None, None) def test_bad_content_transfer_encoding(self): data = b'----boundary\r\nContent-Disposition: form-data; name="file"; filename="test.txt"\r\nContent-Type: text/plain\r\nContent-Transfer-Encoding: badstuff\r\n\r\nTest\r\n----boundary--\r\n' files = [] def on_file(f): files.append(f) on_field = Mock() on_end = Mock() # Test with erroring. config = {'UPLOAD_ERROR_ON_BAD_CTE': True} f = FormParser('multipart/form-data', on_field, on_file, on_end=on_end, boundary='--boundary', config=config) with self.assertRaises(FormParserError): f.write(data) f.finalize() # Test without erroring. config = {'UPLOAD_ERROR_ON_BAD_CTE': False} f = FormParser('multipart/form-data', on_field, on_file, on_end=on_end, boundary='--boundary', config=config) f.write(data) f.finalize() self.assert_file_data(files[0], b'Test') def test_handles_None_fields(self): fields = [] def on_field(f): fields.append(f) on_file = Mock() on_end = Mock() f = FormParser('application/x-www-form-urlencoded', on_field, on_file, on_end=on_end) f.write(b'foo=bar&another&baz=asdf') f.finalize() self.assertEqual(fields[0].field_name, b'foo') self.assertEqual(fields[0].value, b'bar') self.assertEqual(fields[1].field_name, b'another') self.assertEqual(fields[1].value, None) self.assertEqual(fields[2].field_name, b'baz') self.assertEqual(fields[2].value, b'asdf') def test_max_size_multipart(self): # Load test data. test_file = 'single_field_single_file.http' with open(os.path.join(http_tests_dir, test_file), 'rb') as f: test_data = f.read() # Create form parser. self.make('boundary') # Set the maximum length that we can process to be halfway through the # given data. self.f.parser.max_size = len(test_data) / 2 i = self.f.write(test_data) self.f.finalize() # Assert we processed the correct amount. self.assertEqual(i, len(test_data) / 2) def test_max_size_form_parser(self): # Load test data. test_file = 'single_field_single_file.http' with open(os.path.join(http_tests_dir, test_file), 'rb') as f: test_data = f.read() # Create form parser setting the maximum length that we can process to # be halfway through the given data. size = len(test_data) / 2 self.make('boundary', config={'MAX_BODY_SIZE': size}) i = self.f.write(test_data) self.f.finalize() # Assert we processed the correct amount. self.assertEqual(i, len(test_data) / 2) def test_octet_stream_max_size(self): files = [] def on_file(f): files.append(f) on_field = Mock() on_end = Mock() f = FormParser('application/octet-stream', on_field, on_file, on_end=on_end, file_name=b'foo.txt', config={'MAX_BODY_SIZE': 10}) f.write(b'0123456789012345689') f.finalize() self.assert_file_data(files[0], b'0123456789') def test_invalid_max_size_multipart(self): with self.assertRaises(ValueError): q = MultipartParser(b'bound', max_size='foo') class TestHelperFunctions(unittest.TestCase): def test_create_form_parser(self): r = create_form_parser({'Content-Type': 'application/octet-stream'}, None, None) self.assertTrue(isinstance(r, FormParser)) def test_create_form_parser_error(self): headers = {} with self.assertRaises(ValueError): create_form_parser(headers, None, None) def test_parse_form(self): on_field = Mock() on_file = Mock() parse_form( {'Content-Type': 'application/octet-stream', }, BytesIO(b'123456789012345'), on_field, on_file ) assert on_file.call_count == 1 # Assert that the first argument of the call (a File object) has size # 15 - i.e. all data is written. self.assertEqual(on_file.call_args[0][0].size, 15) def test_parse_form_content_length(self): files = [] def on_file(file): files.append(file) parse_form( {'Content-Type': 'application/octet-stream', 'Content-Length': '10' }, BytesIO(b'123456789012345'), None, on_file ) self.assertEqual(len(files), 1) self.assertEqual(files[0].size, 10) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestFile)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestParseOptionsHeader)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestBaseParser)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestQuerystringParser)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestOctetStreamParser)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestBase64Decoder)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestQuotedPrintableDecoder)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestFormParser)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestHelperFunctions)) return suite
38,988
Python
28.853752
199
0.553555
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multipart/tests/compat.py
import os import re import sys import types import functools def ensure_in_path(path): """ Ensure that a given path is in the sys.path array """ if not os.path.isdir(path): raise RuntimeError('Tried to add nonexisting path') def _samefile(x, y): try: return os.path.samefile(x, y) except OSError: return False except AttributeError: # Probably on Windows. path1 = os.path.abspath(x).lower() path2 = os.path.abspath(y).lower() return path1 == path2 # Remove existing copies of it. for pth in sys.path: if _samefile(pth, path): sys.path.remove(pth) # Add it at the beginning. sys.path.insert(0, path) # Check if pytest is imported. If so, we use it to create marking decorators. # If not, we just create a function that does nothing. try: import pytest except ImportError: pytest = None if pytest is not None: slow_test = pytest.mark.slow_test xfail = pytest.mark.xfail else: slow_test = lambda x: x def xfail(*args, **kwargs): if len(args) > 0 and isinstance(args[0], types.FunctionType): return args[0] return lambda x: x # We don't use the pytest parametrizing function, since it seems to break # with unittest.TestCase subclasses. def parametrize(field_names, field_values): # If we're not given a list of field names, we make it. if not isinstance(field_names, (tuple, list)): field_names = (field_names,) field_values = [(val,) for val in field_values] # Create a decorator that saves this list of field names and values on the # function for later parametrizing. def decorator(func): func.__dict__['param_names'] = field_names func.__dict__['param_values'] = field_values return func return decorator # This is a metaclass that actually performs the parametrization. class ParametrizingMetaclass(type): IDENTIFIER_RE = re.compile('[^A-Za-z0-9]') def __new__(klass, name, bases, attrs): new_attrs = attrs.copy() for attr_name, attr in attrs.items(): # We only care about functions if not isinstance(attr, types.FunctionType): continue param_names = attr.__dict__.pop('param_names', None) param_values = attr.__dict__.pop('param_values', None) if param_names is None or param_values is None: continue # Create multiple copies of the function. for i, values in enumerate(param_values): assert len(param_names) == len(values) # Get a repr of the values, and fix it to be a valid identifier human = '_'.join( [klass.IDENTIFIER_RE.sub('', repr(x)) for x in values] ) # Create a new name. # new_name = attr.__name__ + "_%d" % i new_name = attr.__name__ + "__" + human # Create a replacement function. def create_new_func(func, names, values): # Create a kwargs dictionary. kwargs = dict(zip(names, values)) @functools.wraps(func) def new_func(self): return func(self, **kwargs) # Manually set the name and return the new function. new_func.__name__ = new_name return new_func # Actually create the new function. new_func = create_new_func(attr, param_names, values) # Save this new function in our attrs dict. new_attrs[new_name] = new_func # Remove the old attribute from our new dictionary. del new_attrs[attr_name] # We create the class as normal, except we use our new attributes. return type.__new__(klass, name, bases, new_attrs) # This is a class decorator that actually applies the above metaclass. def parametrize_class(klass): return ParametrizingMetaclass(klass.__name__, klass.__bases__, klass.__dict__)
4,266
Python
30.843283
79
0.569386
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/endpoints.py
import json import typing from starlette import status from starlette._utils import is_async_callable from starlette.concurrency import run_in_threadpool from starlette.exceptions import HTTPException from starlette.requests import Request from starlette.responses import PlainTextResponse, Response from starlette.types import Message, Receive, Scope, Send from starlette.websockets import WebSocket class HTTPEndpoint: def __init__(self, scope: Scope, receive: Receive, send: Send) -> None: assert scope["type"] == "http" self.scope = scope self.receive = receive self.send = send self._allowed_methods = [ method for method in ("GET", "HEAD", "POST", "PUT", "PATCH", "DELETE", "OPTIONS") if getattr(self, method.lower(), None) is not None ] def __await__(self) -> typing.Generator: return self.dispatch().__await__() async def dispatch(self) -> None: request = Request(self.scope, receive=self.receive) handler_name = ( "get" if request.method == "HEAD" and not hasattr(self, "head") else request.method.lower() ) handler: typing.Callable[[Request], typing.Any] = getattr( self, handler_name, self.method_not_allowed ) is_async = is_async_callable(handler) if is_async: response = await handler(request) else: response = await run_in_threadpool(handler, request) await response(self.scope, self.receive, self.send) async def method_not_allowed(self, request: Request) -> Response: # If we're running inside a starlette application then raise an # exception, so that the configurable exception handler can deal with # returning the response. For plain ASGI apps, just return the response. headers = {"Allow": ", ".join(self._allowed_methods)} if "app" in self.scope: raise HTTPException(status_code=405, headers=headers) return PlainTextResponse("Method Not Allowed", status_code=405, headers=headers) class WebSocketEndpoint: encoding: typing.Optional[str] = None # May be "text", "bytes", or "json". def __init__(self, scope: Scope, receive: Receive, send: Send) -> None: assert scope["type"] == "websocket" self.scope = scope self.receive = receive self.send = send def __await__(self) -> typing.Generator: return self.dispatch().__await__() async def dispatch(self) -> None: websocket = WebSocket(self.scope, receive=self.receive, send=self.send) await self.on_connect(websocket) close_code = status.WS_1000_NORMAL_CLOSURE try: while True: message = await websocket.receive() if message["type"] == "websocket.receive": data = await self.decode(websocket, message) await self.on_receive(websocket, data) elif message["type"] == "websocket.disconnect": close_code = int( message.get("code") or status.WS_1000_NORMAL_CLOSURE ) break except Exception as exc: close_code = status.WS_1011_INTERNAL_ERROR raise exc finally: await self.on_disconnect(websocket, close_code) async def decode(self, websocket: WebSocket, message: Message) -> typing.Any: if self.encoding == "text": if "text" not in message: await websocket.close(code=status.WS_1003_UNSUPPORTED_DATA) raise RuntimeError("Expected text websocket messages, but got bytes") return message["text"] elif self.encoding == "bytes": if "bytes" not in message: await websocket.close(code=status.WS_1003_UNSUPPORTED_DATA) raise RuntimeError("Expected bytes websocket messages, but got text") return message["bytes"] elif self.encoding == "json": if message.get("text") is not None: text = message["text"] else: text = message["bytes"].decode("utf-8") try: return json.loads(text) except json.decoder.JSONDecodeError: await websocket.close(code=status.WS_1003_UNSUPPORTED_DATA) raise RuntimeError("Malformed JSON data received.") assert ( self.encoding is None ), f"Unsupported 'encoding' attribute {self.encoding}" return message["text"] if message.get("text") else message["bytes"] async def on_connect(self, websocket: WebSocket) -> None: """Override to handle an incoming websocket connection""" await websocket.accept() async def on_receive(self, websocket: WebSocket, data: typing.Any) -> None: """Override to handle an incoming websocket message""" async def on_disconnect(self, websocket: WebSocket, close_code: int) -> None: """Override to handle a disconnecting websocket"""
5,145
Python
37.691729
88
0.607191
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/config.py
import os import typing from collections.abc import MutableMapping from pathlib import Path class undefined: pass class EnvironError(Exception): pass class Environ(MutableMapping): def __init__(self, environ: typing.MutableMapping = os.environ): self._environ = environ self._has_been_read: typing.Set[typing.Any] = set() def __getitem__(self, key: typing.Any) -> typing.Any: self._has_been_read.add(key) return self._environ.__getitem__(key) def __setitem__(self, key: typing.Any, value: typing.Any) -> None: if key in self._has_been_read: raise EnvironError( f"Attempting to set environ['{key}'], but the value has already been " "read." ) self._environ.__setitem__(key, value) def __delitem__(self, key: typing.Any) -> None: if key in self._has_been_read: raise EnvironError( f"Attempting to delete environ['{key}'], but the value has already " "been read." ) self._environ.__delitem__(key) def __iter__(self) -> typing.Iterator: return iter(self._environ) def __len__(self) -> int: return len(self._environ) environ = Environ() T = typing.TypeVar("T") class Config: def __init__( self, env_file: typing.Optional[typing.Union[str, Path]] = None, environ: typing.Mapping[str, str] = environ, env_prefix: str = "", ) -> None: self.environ = environ self.env_prefix = env_prefix self.file_values: typing.Dict[str, str] = {} if env_file is not None and os.path.isfile(env_file): self.file_values = self._read_file(env_file) @typing.overload def __call__(self, key: str, *, default: None) -> typing.Optional[str]: ... @typing.overload def __call__(self, key: str, cast: typing.Type[T], default: T = ...) -> T: ... @typing.overload def __call__( self, key: str, cast: typing.Type[str] = ..., default: str = ... ) -> str: ... @typing.overload def __call__( self, key: str, cast: typing.Callable[[typing.Any], T] = ..., default: typing.Any = ..., ) -> T: ... @typing.overload def __call__( self, key: str, cast: typing.Type[str] = ..., default: T = ... ) -> typing.Union[T, str]: ... def __call__( self, key: str, cast: typing.Optional[typing.Callable] = None, default: typing.Any = undefined, ) -> typing.Any: return self.get(key, cast, default) def get( self, key: str, cast: typing.Optional[typing.Callable] = None, default: typing.Any = undefined, ) -> typing.Any: key = self.env_prefix + key if key in self.environ: value = self.environ[key] return self._perform_cast(key, value, cast) if key in self.file_values: value = self.file_values[key] return self._perform_cast(key, value, cast) if default is not undefined: return self._perform_cast(key, default, cast) raise KeyError(f"Config '{key}' is missing, and has no default.") def _read_file(self, file_name: typing.Union[str, Path]) -> typing.Dict[str, str]: file_values: typing.Dict[str, str] = {} with open(file_name) as input_file: for line in input_file.readlines(): line = line.strip() if "=" in line and not line.startswith("#"): key, value = line.split("=", 1) key = key.strip() value = value.strip().strip("\"'") file_values[key] = value return file_values def _perform_cast( self, key: str, value: typing.Any, cast: typing.Optional[typing.Callable] = None ) -> typing.Any: if cast is None or value is None: return value elif cast is bool and isinstance(value, str): mapping = {"true": True, "1": True, "false": False, "0": False} value = value.lower() if value not in mapping: raise ValueError( f"Config '{key}' has value '{value}'. Not a valid bool." ) return mapping[value] try: return cast(value) except (TypeError, ValueError): raise ValueError( f"Config '{key}' has value '{value}'. Not a valid {cast.__name__}." )
4,607
Python
29.72
88
0.533536
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/exceptions.py
import http import typing import warnings __all__ = ("HTTPException", "WebSocketException") class HTTPException(Exception): def __init__( self, status_code: int, detail: typing.Optional[str] = None, headers: typing.Optional[dict] = None, ) -> None: if detail is None: detail = http.HTTPStatus(status_code).phrase self.status_code = status_code self.detail = detail self.headers = headers def __repr__(self) -> str: class_name = self.__class__.__name__ return f"{class_name}(status_code={self.status_code!r}, detail={self.detail!r})" class WebSocketException(Exception): def __init__(self, code: int, reason: typing.Optional[str] = None) -> None: self.code = code self.reason = reason or "" def __repr__(self) -> str: class_name = self.__class__.__name__ return f"{class_name}(code={self.code!r}, reason={self.reason!r})" __deprecated__ = "ExceptionMiddleware" def __getattr__(name: str) -> typing.Any: # pragma: no cover if name == __deprecated__: from starlette.middleware.exceptions import ExceptionMiddleware warnings.warn( f"{__deprecated__} is deprecated on `starlette.exceptions`. " f"Import it from `starlette.middleware.exceptions` instead.", category=DeprecationWarning, stacklevel=3, ) return ExceptionMiddleware raise AttributeError(f"module '{__name__}' has no attribute '{name}'") def __dir__() -> typing.List[str]: return sorted(list(__all__) + [__deprecated__]) # pragma: no cover
1,648
Python
28.981818
88
0.603155
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/datastructures.py
import typing from collections.abc import Sequence from shlex import shlex from urllib.parse import SplitResult, parse_qsl, urlencode, urlsplit from starlette.concurrency import run_in_threadpool from starlette.types import Scope class Address(typing.NamedTuple): host: str port: int _KeyType = typing.TypeVar("_KeyType") # Mapping keys are invariant but their values are covariant since # you can only read them # that is, you can't do `Mapping[str, Animal]()["fido"] = Dog()` _CovariantValueType = typing.TypeVar("_CovariantValueType", covariant=True) class URL: def __init__( self, url: str = "", scope: typing.Optional[Scope] = None, **components: typing.Any, ) -> None: if scope is not None: assert not url, 'Cannot set both "url" and "scope".' assert not components, 'Cannot set both "scope" and "**components".' scheme = scope.get("scheme", "http") server = scope.get("server", None) path = scope.get("root_path", "") + scope["path"] query_string = scope.get("query_string", b"") host_header = None for key, value in scope["headers"]: if key == b"host": host_header = value.decode("latin-1") break if host_header is not None: url = f"{scheme}://{host_header}{path}" elif server is None: url = path else: host, port = server default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme] if port == default_port: url = f"{scheme}://{host}{path}" else: url = f"{scheme}://{host}:{port}{path}" if query_string: url += "?" + query_string.decode() elif components: assert not url, 'Cannot set both "url" and "**components".' url = URL("").replace(**components).components.geturl() self._url = url @property def components(self) -> SplitResult: if not hasattr(self, "_components"): self._components = urlsplit(self._url) return self._components @property def scheme(self) -> str: return self.components.scheme @property def netloc(self) -> str: return self.components.netloc @property def path(self) -> str: return self.components.path @property def query(self) -> str: return self.components.query @property def fragment(self) -> str: return self.components.fragment @property def username(self) -> typing.Union[None, str]: return self.components.username @property def password(self) -> typing.Union[None, str]: return self.components.password @property def hostname(self) -> typing.Union[None, str]: return self.components.hostname @property def port(self) -> typing.Optional[int]: return self.components.port @property def is_secure(self) -> bool: return self.scheme in ("https", "wss") def replace(self, **kwargs: typing.Any) -> "URL": if ( "username" in kwargs or "password" in kwargs or "hostname" in kwargs or "port" in kwargs ): hostname = kwargs.pop("hostname", None) port = kwargs.pop("port", self.port) username = kwargs.pop("username", self.username) password = kwargs.pop("password", self.password) if hostname is None: netloc = self.netloc _, _, hostname = netloc.rpartition("@") if hostname[-1] != "]": hostname = hostname.rsplit(":", 1)[0] netloc = hostname if port is not None: netloc += f":{port}" if username is not None: userpass = username if password is not None: userpass += f":{password}" netloc = f"{userpass}@{netloc}" kwargs["netloc"] = netloc components = self.components._replace(**kwargs) return self.__class__(components.geturl()) def include_query_params(self, **kwargs: typing.Any) -> "URL": params = MultiDict(parse_qsl(self.query, keep_blank_values=True)) params.update({str(key): str(value) for key, value in kwargs.items()}) query = urlencode(params.multi_items()) return self.replace(query=query) def replace_query_params(self, **kwargs: typing.Any) -> "URL": query = urlencode([(str(key), str(value)) for key, value in kwargs.items()]) return self.replace(query=query) def remove_query_params( self, keys: typing.Union[str, typing.Sequence[str]] ) -> "URL": if isinstance(keys, str): keys = [keys] params = MultiDict(parse_qsl(self.query, keep_blank_values=True)) for key in keys: params.pop(key, None) query = urlencode(params.multi_items()) return self.replace(query=query) def __eq__(self, other: typing.Any) -> bool: return str(self) == str(other) def __str__(self) -> str: return self._url def __repr__(self) -> str: url = str(self) if self.password: url = str(self.replace(password="********")) return f"{self.__class__.__name__}({repr(url)})" class URLPath(str): """ A URL path string that may also hold an associated protocol and/or host. Used by the routing to return `url_path_for` matches. """ def __new__(cls, path: str, protocol: str = "", host: str = "") -> "URLPath": assert protocol in ("http", "websocket", "") return str.__new__(cls, path) def __init__(self, path: str, protocol: str = "", host: str = "") -> None: self.protocol = protocol self.host = host def make_absolute_url(self, base_url: typing.Union[str, URL]) -> str: if isinstance(base_url, str): base_url = URL(base_url) if self.protocol: scheme = { "http": {True: "https", False: "http"}, "websocket": {True: "wss", False: "ws"}, }[self.protocol][base_url.is_secure] else: scheme = base_url.scheme netloc = self.host or base_url.netloc path = base_url.path.rstrip("/") + str(self) return str(URL(scheme=scheme, netloc=netloc, path=path)) class Secret: """ Holds a string value that should not be revealed in tracebacks etc. You should cast the value to `str` at the point it is required. """ def __init__(self, value: str): self._value = value def __repr__(self) -> str: class_name = self.__class__.__name__ return f"{class_name}('**********')" def __str__(self) -> str: return self._value def __bool__(self) -> bool: return bool(self._value) class CommaSeparatedStrings(Sequence): def __init__(self, value: typing.Union[str, typing.Sequence[str]]): if isinstance(value, str): splitter = shlex(value, posix=True) splitter.whitespace = "," splitter.whitespace_split = True self._items = [item.strip() for item in splitter] else: self._items = list(value) def __len__(self) -> int: return len(self._items) def __getitem__(self, index: typing.Union[int, slice]) -> typing.Any: return self._items[index] def __iter__(self) -> typing.Iterator[str]: return iter(self._items) def __repr__(self) -> str: class_name = self.__class__.__name__ items = [item for item in self] return f"{class_name}({items!r})" def __str__(self) -> str: return ", ".join(repr(item) for item in self) class ImmutableMultiDict(typing.Mapping[_KeyType, _CovariantValueType]): _dict: typing.Dict[_KeyType, _CovariantValueType] def __init__( self, *args: typing.Union[ "ImmutableMultiDict[_KeyType, _CovariantValueType]", typing.Mapping[_KeyType, _CovariantValueType], typing.Iterable[typing.Tuple[_KeyType, _CovariantValueType]], ], **kwargs: typing.Any, ) -> None: assert len(args) < 2, "Too many arguments." value: typing.Any = args[0] if args else [] if kwargs: value = ( ImmutableMultiDict(value).multi_items() + ImmutableMultiDict(kwargs).multi_items() # type: ignore[operator] ) if not value: _items: typing.List[typing.Tuple[typing.Any, typing.Any]] = [] elif hasattr(value, "multi_items"): value = typing.cast( ImmutableMultiDict[_KeyType, _CovariantValueType], value ) _items = list(value.multi_items()) elif hasattr(value, "items"): value = typing.cast(typing.Mapping[_KeyType, _CovariantValueType], value) _items = list(value.items()) else: value = typing.cast( typing.List[typing.Tuple[typing.Any, typing.Any]], value ) _items = list(value) self._dict = {k: v for k, v in _items} self._list = _items def getlist(self, key: typing.Any) -> typing.List[_CovariantValueType]: return [item_value for item_key, item_value in self._list if item_key == key] def keys(self) -> typing.KeysView[_KeyType]: return self._dict.keys() def values(self) -> typing.ValuesView[_CovariantValueType]: return self._dict.values() def items(self) -> typing.ItemsView[_KeyType, _CovariantValueType]: return self._dict.items() def multi_items(self) -> typing.List[typing.Tuple[_KeyType, _CovariantValueType]]: return list(self._list) def __getitem__(self, key: _KeyType) -> _CovariantValueType: return self._dict[key] def __contains__(self, key: typing.Any) -> bool: return key in self._dict def __iter__(self) -> typing.Iterator[_KeyType]: return iter(self.keys()) def __len__(self) -> int: return len(self._dict) def __eq__(self, other: typing.Any) -> bool: if not isinstance(other, self.__class__): return False return sorted(self._list) == sorted(other._list) def __repr__(self) -> str: class_name = self.__class__.__name__ items = self.multi_items() return f"{class_name}({items!r})" class MultiDict(ImmutableMultiDict[typing.Any, typing.Any]): def __setitem__(self, key: typing.Any, value: typing.Any) -> None: self.setlist(key, [value]) def __delitem__(self, key: typing.Any) -> None: self._list = [(k, v) for k, v in self._list if k != key] del self._dict[key] def pop(self, key: typing.Any, default: typing.Any = None) -> typing.Any: self._list = [(k, v) for k, v in self._list if k != key] return self._dict.pop(key, default) def popitem(self) -> typing.Tuple: key, value = self._dict.popitem() self._list = [(k, v) for k, v in self._list if k != key] return key, value def poplist(self, key: typing.Any) -> typing.List: values = [v for k, v in self._list if k == key] self.pop(key) return values def clear(self) -> None: self._dict.clear() self._list.clear() def setdefault(self, key: typing.Any, default: typing.Any = None) -> typing.Any: if key not in self: self._dict[key] = default self._list.append((key, default)) return self[key] def setlist(self, key: typing.Any, values: typing.List) -> None: if not values: self.pop(key, None) else: existing_items = [(k, v) for (k, v) in self._list if k != key] self._list = existing_items + [(key, value) for value in values] self._dict[key] = values[-1] def append(self, key: typing.Any, value: typing.Any) -> None: self._list.append((key, value)) self._dict[key] = value def update( self, *args: typing.Union[ "MultiDict", typing.Mapping, typing.List[typing.Tuple[typing.Any, typing.Any]], ], **kwargs: typing.Any, ) -> None: value = MultiDict(*args, **kwargs) existing_items = [(k, v) for (k, v) in self._list if k not in value.keys()] self._list = existing_items + value.multi_items() self._dict.update(value) class QueryParams(ImmutableMultiDict[str, str]): """ An immutable multidict. """ def __init__( self, *args: typing.Union[ "ImmutableMultiDict", typing.Mapping, typing.List[typing.Tuple[typing.Any, typing.Any]], str, bytes, ], **kwargs: typing.Any, ) -> None: assert len(args) < 2, "Too many arguments." value = args[0] if args else [] if isinstance(value, str): super().__init__(parse_qsl(value, keep_blank_values=True), **kwargs) elif isinstance(value, bytes): super().__init__( parse_qsl(value.decode("latin-1"), keep_blank_values=True), **kwargs ) else: super().__init__(*args, **kwargs) # type: ignore[arg-type] self._list = [(str(k), str(v)) for k, v in self._list] self._dict = {str(k): str(v) for k, v in self._dict.items()} def __str__(self) -> str: return urlencode(self._list) def __repr__(self) -> str: class_name = self.__class__.__name__ query_string = str(self) return f"{class_name}({query_string!r})" class UploadFile: """ An uploaded file included as part of the request data. """ def __init__( self, file: typing.BinaryIO, *, size: typing.Optional[int] = None, filename: typing.Optional[str] = None, headers: "typing.Optional[Headers]" = None, ) -> None: self.filename = filename self.file = file self.size = size self.headers = headers or Headers() @property def content_type(self) -> typing.Optional[str]: return self.headers.get("content-type", None) @property def _in_memory(self) -> bool: # check for SpooledTemporaryFile._rolled rolled_to_disk = getattr(self.file, "_rolled", True) return not rolled_to_disk async def write(self, data: bytes) -> None: if self.size is not None: self.size += len(data) if self._in_memory: self.file.write(data) else: await run_in_threadpool(self.file.write, data) async def read(self, size: int = -1) -> bytes: if self._in_memory: return self.file.read(size) return await run_in_threadpool(self.file.read, size) async def seek(self, offset: int) -> None: if self._in_memory: self.file.seek(offset) else: await run_in_threadpool(self.file.seek, offset) async def close(self) -> None: if self._in_memory: self.file.close() else: await run_in_threadpool(self.file.close) class FormData(ImmutableMultiDict[str, typing.Union[UploadFile, str]]): """ An immutable multidict, containing both file uploads and text input. """ def __init__( self, *args: typing.Union[ "FormData", typing.Mapping[str, typing.Union[str, UploadFile]], typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]], ], **kwargs: typing.Union[str, UploadFile], ) -> None: super().__init__(*args, **kwargs) async def close(self) -> None: for key, value in self.multi_items(): if isinstance(value, UploadFile): await value.close() class Headers(typing.Mapping[str, str]): """ An immutable, case-insensitive multidict. """ def __init__( self, headers: typing.Optional[typing.Mapping[str, str]] = None, raw: typing.Optional[typing.List[typing.Tuple[bytes, bytes]]] = None, scope: typing.Optional[typing.MutableMapping[str, typing.Any]] = None, ) -> None: self._list: typing.List[typing.Tuple[bytes, bytes]] = [] if headers is not None: assert raw is None, 'Cannot set both "headers" and "raw".' assert scope is None, 'Cannot set both "headers" and "scope".' self._list = [ (key.lower().encode("latin-1"), value.encode("latin-1")) for key, value in headers.items() ] elif raw is not None: assert scope is None, 'Cannot set both "raw" and "scope".' self._list = raw elif scope is not None: # scope["headers"] isn't necessarily a list # it might be a tuple or other iterable self._list = scope["headers"] = list(scope["headers"]) @property def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]: return list(self._list) def keys(self) -> typing.List[str]: # type: ignore[override] return [key.decode("latin-1") for key, value in self._list] def values(self) -> typing.List[str]: # type: ignore[override] return [value.decode("latin-1") for key, value in self._list] def items(self) -> typing.List[typing.Tuple[str, str]]: # type: ignore[override] return [ (key.decode("latin-1"), value.decode("latin-1")) for key, value in self._list ] def getlist(self, key: str) -> typing.List[str]: get_header_key = key.lower().encode("latin-1") return [ item_value.decode("latin-1") for item_key, item_value in self._list if item_key == get_header_key ] def mutablecopy(self) -> "MutableHeaders": return MutableHeaders(raw=self._list[:]) def __getitem__(self, key: str) -> str: get_header_key = key.lower().encode("latin-1") for header_key, header_value in self._list: if header_key == get_header_key: return header_value.decode("latin-1") raise KeyError(key) def __contains__(self, key: typing.Any) -> bool: get_header_key = key.lower().encode("latin-1") for header_key, header_value in self._list: if header_key == get_header_key: return True return False def __iter__(self) -> typing.Iterator[typing.Any]: return iter(self.keys()) def __len__(self) -> int: return len(self._list) def __eq__(self, other: typing.Any) -> bool: if not isinstance(other, Headers): return False return sorted(self._list) == sorted(other._list) def __repr__(self) -> str: class_name = self.__class__.__name__ as_dict = dict(self.items()) if len(as_dict) == len(self): return f"{class_name}({as_dict!r})" return f"{class_name}(raw={self.raw!r})" class MutableHeaders(Headers): def __setitem__(self, key: str, value: str) -> None: """ Set the header `key` to `value`, removing any duplicate entries. Retains insertion order. """ set_key = key.lower().encode("latin-1") set_value = value.encode("latin-1") found_indexes: "typing.List[int]" = [] for idx, (item_key, item_value) in enumerate(self._list): if item_key == set_key: found_indexes.append(idx) for idx in reversed(found_indexes[1:]): del self._list[idx] if found_indexes: idx = found_indexes[0] self._list[idx] = (set_key, set_value) else: self._list.append((set_key, set_value)) def __delitem__(self, key: str) -> None: """ Remove the header `key`. """ del_key = key.lower().encode("latin-1") pop_indexes: "typing.List[int]" = [] for idx, (item_key, item_value) in enumerate(self._list): if item_key == del_key: pop_indexes.append(idx) for idx in reversed(pop_indexes): del self._list[idx] def __ior__(self, other: typing.Mapping[str, str]) -> "MutableHeaders": if not isinstance(other, typing.Mapping): raise TypeError(f"Expected a mapping but got {other.__class__.__name__}") self.update(other) return self def __or__(self, other: typing.Mapping[str, str]) -> "MutableHeaders": if not isinstance(other, typing.Mapping): raise TypeError(f"Expected a mapping but got {other.__class__.__name__}") new = self.mutablecopy() new.update(other) return new @property def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]: return self._list def setdefault(self, key: str, value: str) -> str: """ If the header `key` does not exist, then set it to `value`. Returns the header value. """ set_key = key.lower().encode("latin-1") set_value = value.encode("latin-1") for idx, (item_key, item_value) in enumerate(self._list): if item_key == set_key: return item_value.decode("latin-1") self._list.append((set_key, set_value)) return value def update(self, other: typing.Mapping[str, str]) -> None: for key, val in other.items(): self[key] = val def append(self, key: str, value: str) -> None: """ Append a header, preserving any duplicate entries. """ append_key = key.lower().encode("latin-1") append_value = value.encode("latin-1") self._list.append((append_key, append_value)) def add_vary_header(self, vary: str) -> None: existing = self.get("vary") if existing is not None: vary = ", ".join([existing, vary]) self["vary"] = vary class State: """ An object that can be used to store arbitrary state. Used for `request.state` and `app.state`. """ _state: typing.Dict[str, typing.Any] def __init__(self, state: typing.Optional[typing.Dict[str, typing.Any]] = None): if state is None: state = {} super().__setattr__("_state", state) def __setattr__(self, key: typing.Any, value: typing.Any) -> None: self._state[key] = value def __getattr__(self, key: typing.Any) -> typing.Any: try: return self._state[key] except KeyError: message = "'{}' object has no attribute '{}'" raise AttributeError(message.format(self.__class__.__name__, key)) def __delattr__(self, key: typing.Any) -> None: del self._state[key]
23,092
Python
31.571227
87
0.554867
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/responses.py
import http.cookies import json import os import stat import sys import typing from datetime import datetime from email.utils import format_datetime, formatdate from functools import partial from mimetypes import guess_type as mimetypes_guess_type from urllib.parse import quote import anyio from starlette._compat import md5_hexdigest from starlette.background import BackgroundTask from starlette.concurrency import iterate_in_threadpool from starlette.datastructures import URL, MutableHeaders from starlette.types import Receive, Scope, Send if sys.version_info >= (3, 8): # pragma: no cover from typing import Literal else: # pragma: no cover from typing_extensions import Literal # Workaround for adding samesite support to pre 3.8 python http.cookies.Morsel._reserved["samesite"] = "SameSite" # type: ignore[attr-defined] # Compatibility wrapper for `mimetypes.guess_type` to support `os.PathLike` on <py3.8 def guess_type( url: typing.Union[str, "os.PathLike[str]"], strict: bool = True ) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]: if sys.version_info < (3, 8): # pragma: no cover url = os.fspath(url) return mimetypes_guess_type(url, strict) class Response: media_type = None charset = "utf-8" def __init__( self, content: typing.Any = None, status_code: int = 200, headers: typing.Optional[typing.Mapping[str, str]] = None, media_type: typing.Optional[str] = None, background: typing.Optional[BackgroundTask] = None, ) -> None: self.status_code = status_code if media_type is not None: self.media_type = media_type self.background = background self.body = self.render(content) self.init_headers(headers) def render(self, content: typing.Any) -> bytes: if content is None: return b"" if isinstance(content, bytes): return content return content.encode(self.charset) def init_headers( self, headers: typing.Optional[typing.Mapping[str, str]] = None ) -> None: if headers is None: raw_headers: typing.List[typing.Tuple[bytes, bytes]] = [] populate_content_length = True populate_content_type = True else: raw_headers = [ (k.lower().encode("latin-1"), v.encode("latin-1")) for k, v in headers.items() ] keys = [h[0] for h in raw_headers] populate_content_length = b"content-length" not in keys populate_content_type = b"content-type" not in keys body = getattr(self, "body", None) if ( body is not None and populate_content_length and not (self.status_code < 200 or self.status_code in (204, 304)) ): content_length = str(len(body)) raw_headers.append((b"content-length", content_length.encode("latin-1"))) content_type = self.media_type if content_type is not None and populate_content_type: if content_type.startswith("text/"): content_type += "; charset=" + self.charset raw_headers.append((b"content-type", content_type.encode("latin-1"))) self.raw_headers = raw_headers @property def headers(self) -> MutableHeaders: if not hasattr(self, "_headers"): self._headers = MutableHeaders(raw=self.raw_headers) return self._headers def set_cookie( self, key: str, value: str = "", max_age: typing.Optional[int] = None, expires: typing.Optional[typing.Union[datetime, str, int]] = None, path: str = "/", domain: typing.Optional[str] = None, secure: bool = False, httponly: bool = False, samesite: typing.Optional[Literal["lax", "strict", "none"]] = "lax", ) -> None: cookie: "http.cookies.BaseCookie[str]" = http.cookies.SimpleCookie() cookie[key] = value if max_age is not None: cookie[key]["max-age"] = max_age if expires is not None: if isinstance(expires, datetime): cookie[key]["expires"] = format_datetime(expires, usegmt=True) else: cookie[key]["expires"] = expires if path is not None: cookie[key]["path"] = path if domain is not None: cookie[key]["domain"] = domain if secure: cookie[key]["secure"] = True if httponly: cookie[key]["httponly"] = True if samesite is not None: assert samesite.lower() in [ "strict", "lax", "none", ], "samesite must be either 'strict', 'lax' or 'none'" cookie[key]["samesite"] = samesite cookie_val = cookie.output(header="").strip() self.raw_headers.append((b"set-cookie", cookie_val.encode("latin-1"))) def delete_cookie( self, key: str, path: str = "/", domain: typing.Optional[str] = None, secure: bool = False, httponly: bool = False, samesite: typing.Optional[Literal["lax", "strict", "none"]] = "lax", ) -> None: self.set_cookie( key, max_age=0, expires=0, path=path, domain=domain, secure=secure, httponly=httponly, samesite=samesite, ) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: await send( { "type": "http.response.start", "status": self.status_code, "headers": self.raw_headers, } ) await send({"type": "http.response.body", "body": self.body}) if self.background is not None: await self.background() class HTMLResponse(Response): media_type = "text/html" class PlainTextResponse(Response): media_type = "text/plain" class JSONResponse(Response): media_type = "application/json" def __init__( self, content: typing.Any, status_code: int = 200, headers: typing.Optional[typing.Dict[str, str]] = None, media_type: typing.Optional[str] = None, background: typing.Optional[BackgroundTask] = None, ) -> None: super().__init__(content, status_code, headers, media_type, background) def render(self, content: typing.Any) -> bytes: return json.dumps( content, ensure_ascii=False, allow_nan=False, indent=None, separators=(",", ":"), ).encode("utf-8") class RedirectResponse(Response): def __init__( self, url: typing.Union[str, URL], status_code: int = 307, headers: typing.Optional[typing.Mapping[str, str]] = None, background: typing.Optional[BackgroundTask] = None, ) -> None: super().__init__( content=b"", status_code=status_code, headers=headers, background=background ) self.headers["location"] = quote(str(url), safe=":/%#?=@[]!$&'()*+,;") Content = typing.Union[str, bytes] SyncContentStream = typing.Iterator[Content] AsyncContentStream = typing.AsyncIterable[Content] ContentStream = typing.Union[AsyncContentStream, SyncContentStream] class StreamingResponse(Response): body_iterator: AsyncContentStream def __init__( self, content: ContentStream, status_code: int = 200, headers: typing.Optional[typing.Mapping[str, str]] = None, media_type: typing.Optional[str] = None, background: typing.Optional[BackgroundTask] = None, ) -> None: if isinstance(content, typing.AsyncIterable): self.body_iterator = content else: self.body_iterator = iterate_in_threadpool(content) self.status_code = status_code self.media_type = self.media_type if media_type is None else media_type self.background = background self.init_headers(headers) async def listen_for_disconnect(self, receive: Receive) -> None: while True: message = await receive() if message["type"] == "http.disconnect": break async def stream_response(self, send: Send) -> None: await send( { "type": "http.response.start", "status": self.status_code, "headers": self.raw_headers, } ) async for chunk in self.body_iterator: if not isinstance(chunk, bytes): chunk = chunk.encode(self.charset) await send({"type": "http.response.body", "body": chunk, "more_body": True}) await send({"type": "http.response.body", "body": b"", "more_body": False}) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: async with anyio.create_task_group() as task_group: async def wrap(func: "typing.Callable[[], typing.Awaitable[None]]") -> None: await func() task_group.cancel_scope.cancel() task_group.start_soon(wrap, partial(self.stream_response, send)) await wrap(partial(self.listen_for_disconnect, receive)) if self.background is not None: await self.background() class FileResponse(Response): chunk_size = 64 * 1024 def __init__( self, path: typing.Union[str, "os.PathLike[str]"], status_code: int = 200, headers: typing.Optional[typing.Mapping[str, str]] = None, media_type: typing.Optional[str] = None, background: typing.Optional[BackgroundTask] = None, filename: typing.Optional[str] = None, stat_result: typing.Optional[os.stat_result] = None, method: typing.Optional[str] = None, content_disposition_type: str = "attachment", ) -> None: self.path = path self.status_code = status_code self.filename = filename self.send_header_only = method is not None and method.upper() == "HEAD" if media_type is None: media_type = guess_type(filename or path)[0] or "text/plain" self.media_type = media_type self.background = background self.init_headers(headers) if self.filename is not None: content_disposition_filename = quote(self.filename) if content_disposition_filename != self.filename: content_disposition = "{}; filename*=utf-8''{}".format( content_disposition_type, content_disposition_filename ) else: content_disposition = '{}; filename="{}"'.format( content_disposition_type, self.filename ) self.headers.setdefault("content-disposition", content_disposition) self.stat_result = stat_result if stat_result is not None: self.set_stat_headers(stat_result) def set_stat_headers(self, stat_result: os.stat_result) -> None: content_length = str(stat_result.st_size) last_modified = formatdate(stat_result.st_mtime, usegmt=True) etag_base = str(stat_result.st_mtime) + "-" + str(stat_result.st_size) etag = md5_hexdigest(etag_base.encode(), usedforsecurity=False) self.headers.setdefault("content-length", content_length) self.headers.setdefault("last-modified", last_modified) self.headers.setdefault("etag", etag) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if self.stat_result is None: try: stat_result = await anyio.to_thread.run_sync(os.stat, self.path) self.set_stat_headers(stat_result) except FileNotFoundError: raise RuntimeError(f"File at path {self.path} does not exist.") else: mode = stat_result.st_mode if not stat.S_ISREG(mode): raise RuntimeError(f"File at path {self.path} is not a file.") await send( { "type": "http.response.start", "status": self.status_code, "headers": self.raw_headers, } ) if self.send_header_only: await send({"type": "http.response.body", "body": b"", "more_body": False}) else: async with await anyio.open_file(self.path, mode="rb") as file: more_body = True while more_body: chunk = await file.read(self.chunk_size) more_body = len(chunk) == self.chunk_size await send( { "type": "http.response.body", "body": chunk, "more_body": more_body, } ) if self.background is not None: await self.background()
13,147
Python
34.825613
88
0.573135
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/background.py
import sys import typing if sys.version_info >= (3, 10): # pragma: no cover from typing import ParamSpec else: # pragma: no cover from typing_extensions import ParamSpec from starlette._utils import is_async_callable from starlette.concurrency import run_in_threadpool P = ParamSpec("P") class BackgroundTask: def __init__( self, func: typing.Callable[P, typing.Any], *args: P.args, **kwargs: P.kwargs ) -> None: self.func = func self.args = args self.kwargs = kwargs self.is_async = is_async_callable(func) async def __call__(self) -> None: if self.is_async: await self.func(*self.args, **self.kwargs) else: await run_in_threadpool(self.func, *self.args, **self.kwargs) class BackgroundTasks(BackgroundTask): def __init__(self, tasks: typing.Optional[typing.Sequence[BackgroundTask]] = None): self.tasks = list(tasks) if tasks else [] def add_task( self, func: typing.Callable[P, typing.Any], *args: P.args, **kwargs: P.kwargs ) -> None: task = BackgroundTask(func, *args, **kwargs) self.tasks.append(task) async def __call__(self) -> None: for task in self.tasks: await task()
1,259
Python
27.636363
87
0.622716
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/_compat.py
import hashlib # Compat wrapper to always include the `usedforsecurity=...` parameter, # which is only added from Python 3.9 onwards. # We use this flag to indicate that we use `md5` hashes only for non-security # cases (our ETag checksums). # If we don't indicate that we're using MD5 for non-security related reasons, # then attempting to use this function will raise an error when used # environments which enable a strict "FIPs mode". # # See issue: https://github.com/encode/starlette/issues/1365 try: # check if the Python version supports the parameter # using usedforsecurity=False to avoid an exception on FIPS systems # that reject usedforsecurity=True hashlib.md5(b"data", usedforsecurity=False) # type: ignore[call-arg] def md5_hexdigest( data: bytes, *, usedforsecurity: bool = True ) -> str: # pragma: no cover return hashlib.md5( # type: ignore[call-arg] data, usedforsecurity=usedforsecurity ).hexdigest() except TypeError: # pragma: no cover def md5_hexdigest(data: bytes, *, usedforsecurity: bool = True) -> str: return hashlib.md5(data).hexdigest()
1,149
Python
37.333332
77
0.706701
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/templating.py
import typing from os import PathLike from starlette.background import BackgroundTask from starlette.requests import Request from starlette.responses import Response from starlette.types import Receive, Scope, Send try: import jinja2 # @contextfunction was renamed to @pass_context in Jinja 3.0, and was removed in 3.1 # hence we try to get pass_context (most installs will be >=3.1) # and fall back to contextfunction, # adding a type ignore for mypy to let us access an attribute that may not exist if hasattr(jinja2, "pass_context"): pass_context = jinja2.pass_context else: # pragma: nocover pass_context = jinja2.contextfunction # type: ignore[attr-defined] except ImportError: # pragma: nocover jinja2 = None # type: ignore[assignment] class _TemplateResponse(Response): media_type = "text/html" def __init__( self, template: typing.Any, context: dict, status_code: int = 200, headers: typing.Optional[typing.Mapping[str, str]] = None, media_type: typing.Optional[str] = None, background: typing.Optional[BackgroundTask] = None, ): self.template = template self.context = context content = template.render(context) super().__init__(content, status_code, headers, media_type, background) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: request = self.context.get("request", {}) extensions = request.get("extensions", {}) if "http.response.debug" in extensions: await send( { "type": "http.response.debug", "info": { "template": self.template, "context": self.context, }, } ) await super().__call__(scope, receive, send) class Jinja2Templates: """ templates = Jinja2Templates("templates") return templates.TemplateResponse("index.html", {"request": request}) """ def __init__( self, directory: typing.Union[str, PathLike], context_processors: typing.Optional[ typing.List[typing.Callable[[Request], typing.Dict[str, typing.Any]]] ] = None, **env_options: typing.Any ) -> None: assert jinja2 is not None, "jinja2 must be installed to use Jinja2Templates" self.env = self._create_env(directory, **env_options) self.context_processors = context_processors or [] def _create_env( self, directory: typing.Union[str, PathLike], **env_options: typing.Any ) -> "jinja2.Environment": @pass_context def url_for(context: dict, name: str, **path_params: typing.Any) -> str: request = context["request"] return request.url_for(name, **path_params) loader = jinja2.FileSystemLoader(directory) env_options.setdefault("loader", loader) env_options.setdefault("autoescape", True) env = jinja2.Environment(**env_options) env.globals["url_for"] = url_for return env def get_template(self, name: str) -> "jinja2.Template": return self.env.get_template(name) def TemplateResponse( self, name: str, context: dict, status_code: int = 200, headers: typing.Optional[typing.Mapping[str, str]] = None, media_type: typing.Optional[str] = None, background: typing.Optional[BackgroundTask] = None, ) -> _TemplateResponse: if "request" not in context: raise ValueError('context must include a "request" key') request = typing.cast(Request, context["request"]) for context_processor in self.context_processors: context.update(context_processor(request)) template = self.get_template(name) return _TemplateResponse( template, context, status_code=status_code, headers=headers, media_type=media_type, background=background, )
4,120
Python
33.341666
88
0.606311
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/__init__.py
__version__ = "0.25.0"
23
Python
10.999995
22
0.478261
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/websockets.py
import enum import json import typing from starlette.requests import HTTPConnection from starlette.types import Message, Receive, Scope, Send class WebSocketState(enum.Enum): CONNECTING = 0 CONNECTED = 1 DISCONNECTED = 2 class WebSocketDisconnect(Exception): def __init__(self, code: int = 1000, reason: typing.Optional[str] = None) -> None: self.code = code self.reason = reason or "" class WebSocket(HTTPConnection): def __init__(self, scope: Scope, receive: Receive, send: Send) -> None: super().__init__(scope) assert scope["type"] == "websocket" self._receive = receive self._send = send self.client_state = WebSocketState.CONNECTING self.application_state = WebSocketState.CONNECTING async def receive(self) -> Message: """ Receive ASGI websocket messages, ensuring valid state transitions. """ if self.client_state == WebSocketState.CONNECTING: message = await self._receive() message_type = message["type"] if message_type != "websocket.connect": raise RuntimeError( 'Expected ASGI message "websocket.connect", ' f"but got {message_type!r}" ) self.client_state = WebSocketState.CONNECTED return message elif self.client_state == WebSocketState.CONNECTED: message = await self._receive() message_type = message["type"] if message_type not in {"websocket.receive", "websocket.disconnect"}: raise RuntimeError( 'Expected ASGI message "websocket.receive" or ' f'"websocket.disconnect", but got {message_type!r}' ) if message_type == "websocket.disconnect": self.client_state = WebSocketState.DISCONNECTED return message else: raise RuntimeError( 'Cannot call "receive" once a disconnect message has been received.' ) async def send(self, message: Message) -> None: """ Send ASGI websocket messages, ensuring valid state transitions. """ if self.application_state == WebSocketState.CONNECTING: message_type = message["type"] if message_type not in {"websocket.accept", "websocket.close"}: raise RuntimeError( 'Expected ASGI message "websocket.connect", ' f"but got {message_type!r}" ) if message_type == "websocket.close": self.application_state = WebSocketState.DISCONNECTED else: self.application_state = WebSocketState.CONNECTED await self._send(message) elif self.application_state == WebSocketState.CONNECTED: message_type = message["type"] if message_type not in {"websocket.send", "websocket.close"}: raise RuntimeError( 'Expected ASGI message "websocket.send" or "websocket.close", ' f"but got {message_type!r}" ) if message_type == "websocket.close": self.application_state = WebSocketState.DISCONNECTED await self._send(message) else: raise RuntimeError('Cannot call "send" once a close message has been sent.') async def accept( self, subprotocol: typing.Optional[str] = None, headers: typing.Optional[typing.Iterable[typing.Tuple[bytes, bytes]]] = None, ) -> None: headers = headers or [] if self.client_state == WebSocketState.CONNECTING: # If we haven't yet seen the 'connect' message, then wait for it first. await self.receive() await self.send( {"type": "websocket.accept", "subprotocol": subprotocol, "headers": headers} ) def _raise_on_disconnect(self, message: Message) -> None: if message["type"] == "websocket.disconnect": raise WebSocketDisconnect(message["code"]) async def receive_text(self) -> str: if self.application_state != WebSocketState.CONNECTED: raise RuntimeError( 'WebSocket is not connected. Need to call "accept" first.' ) message = await self.receive() self._raise_on_disconnect(message) return message["text"] async def receive_bytes(self) -> bytes: if self.application_state != WebSocketState.CONNECTED: raise RuntimeError( 'WebSocket is not connected. Need to call "accept" first.' ) message = await self.receive() self._raise_on_disconnect(message) return message["bytes"] async def receive_json(self, mode: str = "text") -> typing.Any: if mode not in {"text", "binary"}: raise RuntimeError('The "mode" argument should be "text" or "binary".') if self.application_state != WebSocketState.CONNECTED: raise RuntimeError( 'WebSocket is not connected. Need to call "accept" first.' ) message = await self.receive() self._raise_on_disconnect(message) if mode == "text": text = message["text"] else: text = message["bytes"].decode("utf-8") return json.loads(text) async def iter_text(self) -> typing.AsyncIterator[str]: try: while True: yield await self.receive_text() except WebSocketDisconnect: pass async def iter_bytes(self) -> typing.AsyncIterator[bytes]: try: while True: yield await self.receive_bytes() except WebSocketDisconnect: pass async def iter_json(self) -> typing.AsyncIterator[typing.Any]: try: while True: yield await self.receive_json() except WebSocketDisconnect: pass async def send_text(self, data: str) -> None: await self.send({"type": "websocket.send", "text": data}) async def send_bytes(self, data: bytes) -> None: await self.send({"type": "websocket.send", "bytes": data}) async def send_json(self, data: typing.Any, mode: str = "text") -> None: if mode not in {"text", "binary"}: raise RuntimeError('The "mode" argument should be "text" or "binary".') text = json.dumps(data) if mode == "text": await self.send({"type": "websocket.send", "text": text}) else: await self.send({"type": "websocket.send", "bytes": text.encode("utf-8")}) async def close( self, code: int = 1000, reason: typing.Optional[str] = None ) -> None: await self.send( {"type": "websocket.close", "code": code, "reason": reason or ""} ) class WebSocketClose: def __init__(self, code: int = 1000, reason: typing.Optional[str] = None) -> None: self.code = code self.reason = reason or "" async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: await send( {"type": "websocket.close", "code": self.code, "reason": self.reason} )
7,317
Python
36.721649
88
0.574962
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/authentication.py
import functools import inspect import typing from urllib.parse import urlencode from starlette._utils import is_async_callable from starlette.exceptions import HTTPException from starlette.requests import HTTPConnection, Request from starlette.responses import RedirectResponse, Response from starlette.websockets import WebSocket _CallableType = typing.TypeVar("_CallableType", bound=typing.Callable) def has_required_scope(conn: HTTPConnection, scopes: typing.Sequence[str]) -> bool: for scope in scopes: if scope not in conn.auth.scopes: return False return True def requires( scopes: typing.Union[str, typing.Sequence[str]], status_code: int = 403, redirect: typing.Optional[str] = None, ) -> typing.Callable[[_CallableType], _CallableType]: scopes_list = [scopes] if isinstance(scopes, str) else list(scopes) def decorator(func: typing.Callable) -> typing.Callable: sig = inspect.signature(func) for idx, parameter in enumerate(sig.parameters.values()): if parameter.name == "request" or parameter.name == "websocket": type_ = parameter.name break else: raise Exception( f'No "request" or "websocket" argument on function "{func}"' ) if type_ == "websocket": # Handle websocket functions. (Always async) @functools.wraps(func) async def websocket_wrapper( *args: typing.Any, **kwargs: typing.Any ) -> None: websocket = kwargs.get( "websocket", args[idx] if idx < len(args) else None ) assert isinstance(websocket, WebSocket) if not has_required_scope(websocket, scopes_list): await websocket.close() else: await func(*args, **kwargs) return websocket_wrapper elif is_async_callable(func): # Handle async request/response functions. @functools.wraps(func) async def async_wrapper( *args: typing.Any, **kwargs: typing.Any ) -> Response: request = kwargs.get("request", args[idx] if idx < len(args) else None) assert isinstance(request, Request) if not has_required_scope(request, scopes_list): if redirect is not None: orig_request_qparam = urlencode({"next": str(request.url)}) next_url = "{redirect_path}?{orig_request}".format( redirect_path=request.url_for(redirect), orig_request=orig_request_qparam, ) return RedirectResponse(url=next_url, status_code=303) raise HTTPException(status_code=status_code) return await func(*args, **kwargs) return async_wrapper else: # Handle sync request/response functions. @functools.wraps(func) def sync_wrapper(*args: typing.Any, **kwargs: typing.Any) -> Response: request = kwargs.get("request", args[idx] if idx < len(args) else None) assert isinstance(request, Request) if not has_required_scope(request, scopes_list): if redirect is not None: orig_request_qparam = urlencode({"next": str(request.url)}) next_url = "{redirect_path}?{orig_request}".format( redirect_path=request.url_for(redirect), orig_request=orig_request_qparam, ) return RedirectResponse(url=next_url, status_code=303) raise HTTPException(status_code=status_code) return func(*args, **kwargs) return sync_wrapper return decorator # type: ignore[return-value] class AuthenticationError(Exception): pass class AuthenticationBackend: async def authenticate( self, conn: HTTPConnection ) -> typing.Optional[typing.Tuple["AuthCredentials", "BaseUser"]]: raise NotImplementedError() # pragma: no cover class AuthCredentials: def __init__(self, scopes: typing.Optional[typing.Sequence[str]] = None): self.scopes = [] if scopes is None else list(scopes) class BaseUser: @property def is_authenticated(self) -> bool: raise NotImplementedError() # pragma: no cover @property def display_name(self) -> str: raise NotImplementedError() # pragma: no cover @property def identity(self) -> str: raise NotImplementedError() # pragma: no cover class SimpleUser(BaseUser): def __init__(self, username: str) -> None: self.username = username @property def is_authenticated(self) -> bool: return True @property def display_name(self) -> str: return self.username class UnauthenticatedUser(BaseUser): @property def is_authenticated(self) -> bool: return False @property def display_name(self) -> str: return ""
5,246
Python
33.071428
87
0.585208
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/formparsers.py
import typing from dataclasses import dataclass, field from enum import Enum from tempfile import SpooledTemporaryFile from urllib.parse import unquote_plus from starlette.datastructures import FormData, Headers, UploadFile try: import multipart from multipart.multipart import parse_options_header except ImportError: # pragma: nocover parse_options_header = None multipart = None class FormMessage(Enum): FIELD_START = 1 FIELD_NAME = 2 FIELD_DATA = 3 FIELD_END = 4 END = 5 @dataclass class MultipartPart: content_disposition: typing.Optional[bytes] = None field_name: str = "" data: bytes = b"" file: typing.Optional[UploadFile] = None item_headers: typing.List[typing.Tuple[bytes, bytes]] = field(default_factory=list) def _user_safe_decode(src: bytes, codec: str) -> str: try: return src.decode(codec) except (UnicodeDecodeError, LookupError): return src.decode("latin-1") class MultiPartException(Exception): def __init__(self, message: str) -> None: self.message = message class FormParser: def __init__( self, headers: Headers, stream: typing.AsyncGenerator[bytes, None] ) -> None: assert ( multipart is not None ), "The `python-multipart` library must be installed to use form parsing." self.headers = headers self.stream = stream self.messages: typing.List[typing.Tuple[FormMessage, bytes]] = [] def on_field_start(self) -> None: message = (FormMessage.FIELD_START, b"") self.messages.append(message) def on_field_name(self, data: bytes, start: int, end: int) -> None: message = (FormMessage.FIELD_NAME, data[start:end]) self.messages.append(message) def on_field_data(self, data: bytes, start: int, end: int) -> None: message = (FormMessage.FIELD_DATA, data[start:end]) self.messages.append(message) def on_field_end(self) -> None: message = (FormMessage.FIELD_END, b"") self.messages.append(message) def on_end(self) -> None: message = (FormMessage.END, b"") self.messages.append(message) async def parse(self) -> FormData: # Callbacks dictionary. callbacks = { "on_field_start": self.on_field_start, "on_field_name": self.on_field_name, "on_field_data": self.on_field_data, "on_field_end": self.on_field_end, "on_end": self.on_end, } # Create the parser. parser = multipart.QuerystringParser(callbacks) field_name = b"" field_value = b"" items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = [] # Feed the parser with data from the request. async for chunk in self.stream: if chunk: parser.write(chunk) else: parser.finalize() messages = list(self.messages) self.messages.clear() for message_type, message_bytes in messages: if message_type == FormMessage.FIELD_START: field_name = b"" field_value = b"" elif message_type == FormMessage.FIELD_NAME: field_name += message_bytes elif message_type == FormMessage.FIELD_DATA: field_value += message_bytes elif message_type == FormMessage.FIELD_END: name = unquote_plus(field_name.decode("latin-1")) value = unquote_plus(field_value.decode("latin-1")) items.append((name, value)) return FormData(items) class MultiPartParser: max_file_size = 1024 * 1024 def __init__( self, headers: Headers, stream: typing.AsyncGenerator[bytes, None], *, max_files: typing.Union[int, float] = 1000, max_fields: typing.Union[int, float] = 1000, ) -> None: assert ( multipart is not None ), "The `python-multipart` library must be installed to use form parsing." self.headers = headers self.stream = stream self.max_files = max_files self.max_fields = max_fields self.items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = [] self._current_files = 0 self._current_fields = 0 self._current_partial_header_name: bytes = b"" self._current_partial_header_value: bytes = b"" self._current_part = MultipartPart() self._charset = "" self._file_parts_to_write: typing.List[typing.Tuple[MultipartPart, bytes]] = [] self._file_parts_to_finish: typing.List[MultipartPart] = [] self._files_to_close_on_error: typing.List[SpooledTemporaryFile] = [] def on_part_begin(self) -> None: self._current_part = MultipartPart() def on_part_data(self, data: bytes, start: int, end: int) -> None: message_bytes = data[start:end] if self._current_part.file is None: self._current_part.data += message_bytes else: self._file_parts_to_write.append((self._current_part, message_bytes)) def on_part_end(self) -> None: if self._current_part.file is None: self.items.append( ( self._current_part.field_name, _user_safe_decode(self._current_part.data, self._charset), ) ) else: self._file_parts_to_finish.append(self._current_part) # The file can be added to the items right now even though it's not # finished yet, because it will be finished in the `parse()` method, before # self.items is used in the return value. self.items.append((self._current_part.field_name, self._current_part.file)) def on_header_field(self, data: bytes, start: int, end: int) -> None: self._current_partial_header_name += data[start:end] def on_header_value(self, data: bytes, start: int, end: int) -> None: self._current_partial_header_value += data[start:end] def on_header_end(self) -> None: field = self._current_partial_header_name.lower() if field == b"content-disposition": self._current_part.content_disposition = self._current_partial_header_value self._current_part.item_headers.append( (field, self._current_partial_header_value) ) self._current_partial_header_name = b"" self._current_partial_header_value = b"" def on_headers_finished(self) -> None: disposition, options = parse_options_header( self._current_part.content_disposition ) try: self._current_part.field_name = _user_safe_decode( options[b"name"], self._charset ) except KeyError: raise MultiPartException( 'The Content-Disposition header field "name" must be ' "provided." ) if b"filename" in options: self._current_files += 1 if self._current_files > self.max_files: raise MultiPartException( f"Too many files. Maximum number of files is {self.max_files}." ) filename = _user_safe_decode(options[b"filename"], self._charset) tempfile = SpooledTemporaryFile(max_size=self.max_file_size) self._files_to_close_on_error.append(tempfile) self._current_part.file = UploadFile( file=tempfile, # type: ignore[arg-type] size=0, filename=filename, headers=Headers(raw=self._current_part.item_headers), ) else: self._current_fields += 1 if self._current_fields > self.max_fields: raise MultiPartException( f"Too many fields. Maximum number of fields is {self.max_fields}." ) self._current_part.file = None def on_end(self) -> None: pass async def parse(self) -> FormData: # Parse the Content-Type header to get the multipart boundary. _, params = parse_options_header(self.headers["Content-Type"]) charset = params.get(b"charset", "utf-8") if type(charset) == bytes: charset = charset.decode("latin-1") self._charset = charset try: boundary = params[b"boundary"] except KeyError: raise MultiPartException("Missing boundary in multipart.") # Callbacks dictionary. callbacks = { "on_part_begin": self.on_part_begin, "on_part_data": self.on_part_data, "on_part_end": self.on_part_end, "on_header_field": self.on_header_field, "on_header_value": self.on_header_value, "on_header_end": self.on_header_end, "on_headers_finished": self.on_headers_finished, "on_end": self.on_end, } # Create the parser. parser = multipart.MultipartParser(boundary, callbacks) try: # Feed the parser with data from the request. async for chunk in self.stream: parser.write(chunk) # Write file data, it needs to use await with the UploadFile methods # that call the corresponding file methods *in a threadpool*, # otherwise, if they were called directly in the callback methods above # (regular, non-async functions), that would block the event loop in # the main thread. for part, data in self._file_parts_to_write: assert part.file # for type checkers await part.file.write(data) for part in self._file_parts_to_finish: assert part.file # for type checkers await part.file.seek(0) self._file_parts_to_write.clear() self._file_parts_to_finish.clear() except MultiPartException as exc: # Close all the files if there was an error. for file in self._files_to_close_on_error: file.close() raise exc parser.finalize() return FormData(self.items)
10,415
Python
36.602888
87
0.579741
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/testclient.py
import contextlib import inspect import io import json import math import queue import sys import typing import warnings from concurrent.futures import Future from types import GeneratorType from urllib.parse import unquote, urljoin import anyio import anyio.from_thread import httpx from anyio.streams.stapled import StapledObjectStream from starlette._utils import is_async_callable from starlette.types import ASGIApp, Message, Receive, Scope, Send from starlette.websockets import WebSocketDisconnect if sys.version_info >= (3, 8): # pragma: no cover from typing import TypedDict else: # pragma: no cover from typing_extensions import TypedDict _PortalFactoryType = typing.Callable[ [], typing.ContextManager[anyio.abc.BlockingPortal] ] ASGIInstance = typing.Callable[[Receive, Send], typing.Awaitable[None]] ASGI2App = typing.Callable[[Scope], ASGIInstance] ASGI3App = typing.Callable[[Scope, Receive, Send], typing.Awaitable[None]] _RequestData = typing.Mapping[str, typing.Union[str, typing.Iterable[str]]] def _is_asgi3(app: typing.Union[ASGI2App, ASGI3App]) -> bool: if inspect.isclass(app): return hasattr(app, "__await__") return is_async_callable(app) class _WrapASGI2: """ Provide an ASGI3 interface onto an ASGI2 app. """ def __init__(self, app: ASGI2App) -> None: self.app = app async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: instance = self.app(scope) await instance(receive, send) class _AsyncBackend(TypedDict): backend: str backend_options: typing.Dict[str, typing.Any] class _Upgrade(Exception): def __init__(self, session: "WebSocketTestSession") -> None: self.session = session class WebSocketTestSession: def __init__( self, app: ASGI3App, scope: Scope, portal_factory: _PortalFactoryType, ) -> None: self.app = app self.scope = scope self.accepted_subprotocol = None self.portal_factory = portal_factory self._receive_queue: "queue.Queue[typing.Any]" = queue.Queue() self._send_queue: "queue.Queue[typing.Any]" = queue.Queue() self.extra_headers = None def __enter__(self) -> "WebSocketTestSession": self.exit_stack = contextlib.ExitStack() self.portal = self.exit_stack.enter_context(self.portal_factory()) try: _: "Future[None]" = self.portal.start_task_soon(self._run) self.send({"type": "websocket.connect"}) message = self.receive() self._raise_on_close(message) except Exception: self.exit_stack.close() raise self.accepted_subprotocol = message.get("subprotocol", None) self.extra_headers = message.get("headers", None) return self def __exit__(self, *args: typing.Any) -> None: try: self.close(1000) finally: self.exit_stack.close() while not self._send_queue.empty(): message = self._send_queue.get() if isinstance(message, BaseException): raise message async def _run(self) -> None: """ The sub-thread in which the websocket session runs. """ scope = self.scope receive = self._asgi_receive send = self._asgi_send try: await self.app(scope, receive, send) except BaseException as exc: self._send_queue.put(exc) raise async def _asgi_receive(self) -> Message: while self._receive_queue.empty(): await anyio.sleep(0) return self._receive_queue.get() async def _asgi_send(self, message: Message) -> None: self._send_queue.put(message) def _raise_on_close(self, message: Message) -> None: if message["type"] == "websocket.close": raise WebSocketDisconnect( message.get("code", 1000), message.get("reason", "") ) def send(self, message: Message) -> None: self._receive_queue.put(message) def send_text(self, data: str) -> None: self.send({"type": "websocket.receive", "text": data}) def send_bytes(self, data: bytes) -> None: self.send({"type": "websocket.receive", "bytes": data}) def send_json(self, data: typing.Any, mode: str = "text") -> None: assert mode in ["text", "binary"] text = json.dumps(data) if mode == "text": self.send({"type": "websocket.receive", "text": text}) else: self.send({"type": "websocket.receive", "bytes": text.encode("utf-8")}) def close(self, code: int = 1000) -> None: self.send({"type": "websocket.disconnect", "code": code}) def receive(self) -> Message: message = self._send_queue.get() if isinstance(message, BaseException): raise message return message def receive_text(self) -> str: message = self.receive() self._raise_on_close(message) return message["text"] def receive_bytes(self) -> bytes: message = self.receive() self._raise_on_close(message) return message["bytes"] def receive_json(self, mode: str = "text") -> typing.Any: assert mode in ["text", "binary"] message = self.receive() self._raise_on_close(message) if mode == "text": text = message["text"] else: text = message["bytes"].decode("utf-8") return json.loads(text) class _TestClientTransport(httpx.BaseTransport): def __init__( self, app: ASGI3App, portal_factory: _PortalFactoryType, raise_server_exceptions: bool = True, root_path: str = "", ) -> None: self.app = app self.raise_server_exceptions = raise_server_exceptions self.root_path = root_path self.portal_factory = portal_factory def handle_request(self, request: httpx.Request) -> httpx.Response: scheme = request.url.scheme netloc = request.url.netloc.decode(encoding="ascii") path = request.url.path raw_path = request.url.raw_path query = request.url.query.decode(encoding="ascii") default_port = {"http": 80, "ws": 80, "https": 443, "wss": 443}[scheme] if ":" in netloc: host, port_string = netloc.split(":", 1) port = int(port_string) else: host = netloc port = default_port # Include the 'host' header. if "host" in request.headers: headers: typing.List[typing.Tuple[bytes, bytes]] = [] elif port == default_port: # pragma: no cover headers = [(b"host", host.encode())] else: # pragma: no cover headers = [(b"host", (f"{host}:{port}").encode())] # Include other request headers. headers += [ (key.lower().encode(), value.encode()) for key, value in request.headers.items() ] scope: typing.Dict[str, typing.Any] if scheme in {"ws", "wss"}: subprotocol = request.headers.get("sec-websocket-protocol", None) if subprotocol is None: subprotocols: typing.Sequence[str] = [] else: subprotocols = [value.strip() for value in subprotocol.split(",")] scope = { "type": "websocket", "path": unquote(path), "raw_path": raw_path, "root_path": self.root_path, "scheme": scheme, "query_string": query.encode(), "headers": headers, "client": ["testclient", 50000], "server": [host, port], "subprotocols": subprotocols, } session = WebSocketTestSession(self.app, scope, self.portal_factory) raise _Upgrade(session) scope = { "type": "http", "http_version": "1.1", "method": request.method, "path": unquote(path), "raw_path": raw_path, "root_path": self.root_path, "scheme": scheme, "query_string": query.encode(), "headers": headers, "client": ["testclient", 50000], "server": [host, port], "extensions": {"http.response.debug": {}}, } request_complete = False response_started = False response_complete: anyio.Event raw_kwargs: typing.Dict[str, typing.Any] = {"stream": io.BytesIO()} template = None context = None async def receive() -> Message: nonlocal request_complete if request_complete: if not response_complete.is_set(): await response_complete.wait() return {"type": "http.disconnect"} body = request.read() if isinstance(body, str): body_bytes: bytes = body.encode("utf-8") # pragma: no cover elif body is None: body_bytes = b"" # pragma: no cover elif isinstance(body, GeneratorType): try: # pragma: no cover chunk = body.send(None) if isinstance(chunk, str): chunk = chunk.encode("utf-8") return {"type": "http.request", "body": chunk, "more_body": True} except StopIteration: # pragma: no cover request_complete = True return {"type": "http.request", "body": b""} else: body_bytes = body request_complete = True return {"type": "http.request", "body": body_bytes} async def send(message: Message) -> None: nonlocal raw_kwargs, response_started, template, context if message["type"] == "http.response.start": assert ( not response_started ), 'Received multiple "http.response.start" messages.' raw_kwargs["status_code"] = message["status"] raw_kwargs["headers"] = [ (key.decode(), value.decode()) for key, value in message.get("headers", []) ] response_started = True elif message["type"] == "http.response.body": assert ( response_started ), 'Received "http.response.body" without "http.response.start".' assert ( not response_complete.is_set() ), 'Received "http.response.body" after response completed.' body = message.get("body", b"") more_body = message.get("more_body", False) if request.method != "HEAD": raw_kwargs["stream"].write(body) if not more_body: raw_kwargs["stream"].seek(0) response_complete.set() elif message["type"] == "http.response.debug": template = message["info"]["template"] context = message["info"]["context"] try: with self.portal_factory() as portal: response_complete = portal.call(anyio.Event) portal.call(self.app, scope, receive, send) except BaseException as exc: if self.raise_server_exceptions: raise exc if self.raise_server_exceptions: assert response_started, "TestClient did not receive any response." elif not response_started: raw_kwargs = { "status_code": 500, "headers": [], "stream": io.BytesIO(), } raw_kwargs["stream"] = httpx.ByteStream(raw_kwargs["stream"].read()) response = httpx.Response(**raw_kwargs, request=request) if template is not None: response.template = template # type: ignore[attr-defined] response.context = context # type: ignore[attr-defined] return response class TestClient(httpx.Client): __test__ = False task: "Future[None]" portal: typing.Optional[anyio.abc.BlockingPortal] = None def __init__( self, app: ASGIApp, base_url: str = "http://testserver", raise_server_exceptions: bool = True, root_path: str = "", backend: str = "asyncio", backend_options: typing.Optional[typing.Dict[str, typing.Any]] = None, cookies: httpx._client.CookieTypes = None, headers: typing.Dict[str, str] = None, ) -> None: self.async_backend = _AsyncBackend( backend=backend, backend_options=backend_options or {} ) if _is_asgi3(app): app = typing.cast(ASGI3App, app) asgi_app = app else: app = typing.cast(ASGI2App, app) # type: ignore[assignment] asgi_app = _WrapASGI2(app) # type: ignore[arg-type] self.app = asgi_app transport = _TestClientTransport( self.app, portal_factory=self._portal_factory, raise_server_exceptions=raise_server_exceptions, root_path=root_path, ) if headers is None: headers = {} headers.setdefault("user-agent", "testclient") super().__init__( app=self.app, base_url=base_url, headers=headers, transport=transport, follow_redirects=True, cookies=cookies, ) @contextlib.contextmanager def _portal_factory(self) -> typing.Generator[anyio.abc.BlockingPortal, None, None]: if self.portal is not None: yield self.portal else: with anyio.from_thread.start_blocking_portal( **self.async_backend ) as portal: yield portal def _choose_redirect_arg( self, follow_redirects: typing.Optional[bool], allow_redirects: typing.Optional[bool], ) -> typing.Union[bool, httpx._client.UseClientDefault]: redirect: typing.Union[ bool, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT if allow_redirects is not None: message = ( "The `allow_redirects` argument is deprecated. " "Use `follow_redirects` instead." ) warnings.warn(message, DeprecationWarning) redirect = allow_redirects if follow_redirects is not None: redirect = follow_redirects elif allow_redirects is not None and follow_redirects is not None: raise RuntimeError( # pragma: no cover "Cannot use both `allow_redirects` and `follow_redirects`." ) return redirect def request( # type: ignore[override] self, method: str, url: httpx._types.URLTypes, *, content: typing.Optional[httpx._types.RequestContent] = None, data: typing.Optional[_RequestData] = None, files: typing.Optional[httpx._types.RequestFiles] = None, json: typing.Any = None, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: url = self.base_url.join(url) redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().request( method, url, content=content, data=data, # type: ignore[arg-type] files=files, json=json, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def get( # type: ignore[override] self, url: httpx._types.URLTypes, *, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().get( url, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def options( # type: ignore[override] self, url: httpx._types.URLTypes, *, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().options( url, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def head( # type: ignore[override] self, url: httpx._types.URLTypes, *, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().head( url, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def post( # type: ignore[override] self, url: httpx._types.URLTypes, *, content: typing.Optional[httpx._types.RequestContent] = None, data: typing.Optional[_RequestData] = None, files: typing.Optional[httpx._types.RequestFiles] = None, json: typing.Any = None, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().post( url, content=content, data=data, # type: ignore[arg-type] files=files, json=json, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def put( # type: ignore[override] self, url: httpx._types.URLTypes, *, content: typing.Optional[httpx._types.RequestContent] = None, data: typing.Optional[_RequestData] = None, files: typing.Optional[httpx._types.RequestFiles] = None, json: typing.Any = None, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().put( url, content=content, data=data, # type: ignore[arg-type] files=files, json=json, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def patch( # type: ignore[override] self, url: httpx._types.URLTypes, *, content: typing.Optional[httpx._types.RequestContent] = None, data: typing.Optional[_RequestData] = None, files: typing.Optional[httpx._types.RequestFiles] = None, json: typing.Any = None, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().patch( url, content=content, data=data, # type: ignore[arg-type] files=files, json=json, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def delete( # type: ignore[override] self, url: httpx._types.URLTypes, *, params: typing.Optional[httpx._types.QueryParamTypes] = None, headers: typing.Optional[httpx._types.HeaderTypes] = None, cookies: typing.Optional[httpx._types.CookieTypes] = None, auth: typing.Union[ httpx._types.AuthTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, follow_redirects: typing.Optional[bool] = None, allow_redirects: typing.Optional[bool] = None, timeout: typing.Union[ httpx._client.TimeoutTypes, httpx._client.UseClientDefault ] = httpx._client.USE_CLIENT_DEFAULT, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> httpx.Response: redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) return super().delete( url, params=params, headers=headers, cookies=cookies, auth=auth, follow_redirects=redirect, timeout=timeout, extensions=extensions, ) def websocket_connect( self, url: str, subprotocols: typing.Sequence[str] = None, **kwargs: typing.Any ) -> typing.Any: url = urljoin("ws://testserver", url) headers = kwargs.get("headers", {}) headers.setdefault("connection", "upgrade") headers.setdefault("sec-websocket-key", "testserver==") headers.setdefault("sec-websocket-version", "13") if subprotocols is not None: headers.setdefault("sec-websocket-protocol", ", ".join(subprotocols)) kwargs["headers"] = headers try: super().request("GET", url, **kwargs) except _Upgrade as exc: session = exc.session else: raise RuntimeError("Expected WebSocket upgrade") # pragma: no cover return session def __enter__(self) -> "TestClient": with contextlib.ExitStack() as stack: self.portal = portal = stack.enter_context( anyio.from_thread.start_blocking_portal(**self.async_backend) ) @stack.callback def reset_portal() -> None: self.portal = None self.stream_send = StapledObjectStream( *anyio.create_memory_object_stream(math.inf) ) self.stream_receive = StapledObjectStream( *anyio.create_memory_object_stream(math.inf) ) self.task = portal.start_task_soon(self.lifespan) portal.call(self.wait_startup) @stack.callback def wait_shutdown() -> None: portal.call(self.wait_shutdown) self.exit_stack = stack.pop_all() return self def __exit__(self, *args: typing.Any) -> None: self.exit_stack.close() async def lifespan(self) -> None: scope = {"type": "lifespan"} try: await self.app(scope, self.stream_receive.receive, self.stream_send.send) finally: await self.stream_send.send(None) async def wait_startup(self) -> None: await self.stream_receive.send({"type": "lifespan.startup"}) async def receive() -> typing.Any: message = await self.stream_send.receive() if message is None: self.task.result() return message message = await receive() assert message["type"] in ( "lifespan.startup.complete", "lifespan.startup.failed", ) if message["type"] == "lifespan.startup.failed": await receive() async def wait_shutdown(self) -> None: async def receive() -> typing.Any: message = await self.stream_send.receive() if message is None: self.task.result() return message async with self.stream_send: await self.stream_receive.send({"type": "lifespan.shutdown"}) message = await receive() assert message["type"] in ( "lifespan.shutdown.complete", "lifespan.shutdown.failed", ) if message["type"] == "lifespan.shutdown.failed": await receive()
28,877
Python
35.508217
88
0.570939
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/concurrency.py
import functools import sys import typing import warnings import anyio if sys.version_info >= (3, 10): # pragma: no cover from typing import ParamSpec else: # pragma: no cover from typing_extensions import ParamSpec T = typing.TypeVar("T") P = ParamSpec("P") async def run_until_first_complete(*args: typing.Tuple[typing.Callable, dict]) -> None: warnings.warn( "run_until_first_complete is deprecated " "and will be removed in a future version.", DeprecationWarning, ) async with anyio.create_task_group() as task_group: async def run(func: typing.Callable[[], typing.Coroutine]) -> None: await func() task_group.cancel_scope.cancel() for func, kwargs in args: task_group.start_soon(run, functools.partial(func, **kwargs)) async def run_in_threadpool( func: typing.Callable[P, T], *args: P.args, **kwargs: P.kwargs ) -> T: if kwargs: # pragma: no cover # run_sync doesn't accept 'kwargs', so bind them in here func = functools.partial(func, **kwargs) return await anyio.to_thread.run_sync(func, *args) class _StopIteration(Exception): pass def _next(iterator: typing.Iterator[T]) -> T: # We can't raise `StopIteration` from within the threadpool iterator # and catch it outside that context, so we coerce them into a different # exception type. try: return next(iterator) except StopIteration: raise _StopIteration async def iterate_in_threadpool( iterator: typing.Iterator[T], ) -> typing.AsyncIterator[T]: while True: try: yield await anyio.to_thread.run_sync(_next, iterator) except _StopIteration: break
1,741
Python
25.393939
87
0.651924
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/routing.py
import contextlib import functools import inspect import re import traceback import types import typing import warnings from contextlib import asynccontextmanager from enum import Enum from starlette._utils import is_async_callable from starlette.concurrency import run_in_threadpool from starlette.convertors import CONVERTOR_TYPES, Convertor from starlette.datastructures import URL, Headers, URLPath from starlette.exceptions import HTTPException from starlette.middleware import Middleware from starlette.requests import Request from starlette.responses import PlainTextResponse, RedirectResponse from starlette.types import ASGIApp, Receive, Scope, Send from starlette.websockets import WebSocket, WebSocketClose class NoMatchFound(Exception): """ Raised by `.url_for(name, **path_params)` and `.url_path_for(name, **path_params)` if no matching route exists. """ def __init__(self, name: str, path_params: typing.Dict[str, typing.Any]) -> None: params = ", ".join(list(path_params.keys())) super().__init__(f'No route exists for name "{name}" and params "{params}".') class Match(Enum): NONE = 0 PARTIAL = 1 FULL = 2 def iscoroutinefunction_or_partial(obj: typing.Any) -> bool: # pragma: no cover """ Correctly determines if an object is a coroutine function, including those wrapped in functools.partial objects. """ warnings.warn( "iscoroutinefunction_or_partial is deprecated, " "and will be removed in a future release.", DeprecationWarning, ) while isinstance(obj, functools.partial): obj = obj.func return inspect.iscoroutinefunction(obj) def request_response(func: typing.Callable) -> ASGIApp: """ Takes a function or coroutine `func(request) -> response`, and returns an ASGI application. """ is_coroutine = is_async_callable(func) async def app(scope: Scope, receive: Receive, send: Send) -> None: request = Request(scope, receive=receive, send=send) if is_coroutine: response = await func(request) else: response = await run_in_threadpool(func, request) await response(scope, receive, send) return app def websocket_session(func: typing.Callable) -> ASGIApp: """ Takes a coroutine `func(session)`, and returns an ASGI application. """ # assert asyncio.iscoroutinefunction(func), "WebSocket endpoints must be async" async def app(scope: Scope, receive: Receive, send: Send) -> None: session = WebSocket(scope, receive=receive, send=send) await func(session) return app def get_name(endpoint: typing.Callable) -> str: if inspect.isroutine(endpoint) or inspect.isclass(endpoint): return endpoint.__name__ return endpoint.__class__.__name__ def replace_params( path: str, param_convertors: typing.Dict[str, Convertor], path_params: typing.Dict[str, str], ) -> typing.Tuple[str, dict]: for key, value in list(path_params.items()): if "{" + key + "}" in path: convertor = param_convertors[key] value = convertor.to_string(value) path = path.replace("{" + key + "}", value) path_params.pop(key) return path, path_params # Match parameters in URL paths, eg. '{param}', and '{param:int}' PARAM_REGEX = re.compile("{([a-zA-Z_][a-zA-Z0-9_]*)(:[a-zA-Z_][a-zA-Z0-9_]*)?}") def compile_path( path: str, ) -> typing.Tuple[typing.Pattern, str, typing.Dict[str, Convertor]]: """ Given a path string, like: "/{username:str}", or a host string, like: "{subdomain}.mydomain.org", return a three-tuple of (regex, format, {param_name:convertor}). regex: "/(?P<username>[^/]+)" format: "/{username}" convertors: {"username": StringConvertor()} """ is_host = not path.startswith("/") path_regex = "^" path_format = "" duplicated_params = set() idx = 0 param_convertors = {} for match in PARAM_REGEX.finditer(path): param_name, convertor_type = match.groups("str") convertor_type = convertor_type.lstrip(":") assert ( convertor_type in CONVERTOR_TYPES ), f"Unknown path convertor '{convertor_type}'" convertor = CONVERTOR_TYPES[convertor_type] path_regex += re.escape(path[idx : match.start()]) path_regex += f"(?P<{param_name}>{convertor.regex})" path_format += path[idx : match.start()] path_format += "{%s}" % param_name if param_name in param_convertors: duplicated_params.add(param_name) param_convertors[param_name] = convertor idx = match.end() if duplicated_params: names = ", ".join(sorted(duplicated_params)) ending = "s" if len(duplicated_params) > 1 else "" raise ValueError(f"Duplicated param name{ending} {names} at path {path}") if is_host: # Align with `Host.matches()` behavior, which ignores port. hostname = path[idx:].split(":")[0] path_regex += re.escape(hostname) + "$" else: path_regex += re.escape(path[idx:]) + "$" path_format += path[idx:] return re.compile(path_regex), path_format, param_convertors class BaseRoute: def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]: raise NotImplementedError() # pragma: no cover def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath: raise NotImplementedError() # pragma: no cover async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: raise NotImplementedError() # pragma: no cover async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: """ A route may be used in isolation as a stand-alone ASGI app. This is a somewhat contrived case, as they'll almost always be used within a Router, but could be useful for some tooling and minimal apps. """ match, child_scope = self.matches(scope) if match == Match.NONE: if scope["type"] == "http": response = PlainTextResponse("Not Found", status_code=404) await response(scope, receive, send) elif scope["type"] == "websocket": websocket_close = WebSocketClose() await websocket_close(scope, receive, send) return scope.update(child_scope) await self.handle(scope, receive, send) class Route(BaseRoute): def __init__( self, path: str, endpoint: typing.Callable, *, methods: typing.Optional[typing.List[str]] = None, name: typing.Optional[str] = None, include_in_schema: bool = True, ) -> None: assert path.startswith("/"), "Routed paths must start with '/'" self.path = path self.endpoint = endpoint self.name = get_name(endpoint) if name is None else name self.include_in_schema = include_in_schema endpoint_handler = endpoint while isinstance(endpoint_handler, functools.partial): endpoint_handler = endpoint_handler.func if inspect.isfunction(endpoint_handler) or inspect.ismethod(endpoint_handler): # Endpoint is function or method. Treat it as `func(request) -> response`. self.app = request_response(endpoint) if methods is None: methods = ["GET"] else: # Endpoint is a class. Treat it as ASGI. self.app = endpoint if methods is None: self.methods = None else: self.methods = {method.upper() for method in methods} if "GET" in self.methods: self.methods.add("HEAD") self.path_regex, self.path_format, self.param_convertors = compile_path(path) def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]: if scope["type"] == "http": match = self.path_regex.match(scope["path"]) if match: matched_params = match.groupdict() for key, value in matched_params.items(): matched_params[key] = self.param_convertors[key].convert(value) path_params = dict(scope.get("path_params", {})) path_params.update(matched_params) child_scope = {"endpoint": self.endpoint, "path_params": path_params} if self.methods and scope["method"] not in self.methods: return Match.PARTIAL, child_scope else: return Match.FULL, child_scope return Match.NONE, {} def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath: seen_params = set(path_params.keys()) expected_params = set(self.param_convertors.keys()) if name != self.name or seen_params != expected_params: raise NoMatchFound(name, path_params) path, remaining_params = replace_params( self.path_format, self.param_convertors, path_params ) assert not remaining_params return URLPath(path=path, protocol="http") async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: if self.methods and scope["method"] not in self.methods: headers = {"Allow": ", ".join(self.methods)} if "app" in scope: raise HTTPException(status_code=405, headers=headers) else: response = PlainTextResponse( "Method Not Allowed", status_code=405, headers=headers ) await response(scope, receive, send) else: await self.app(scope, receive, send) def __eq__(self, other: typing.Any) -> bool: return ( isinstance(other, Route) and self.path == other.path and self.endpoint == other.endpoint and self.methods == other.methods ) def __repr__(self) -> str: class_name = self.__class__.__name__ methods = sorted(self.methods or []) path, name = self.path, self.name return f"{class_name}(path={path!r}, name={name!r}, methods={methods!r})" class WebSocketRoute(BaseRoute): def __init__( self, path: str, endpoint: typing.Callable, *, name: typing.Optional[str] = None ) -> None: assert path.startswith("/"), "Routed paths must start with '/'" self.path = path self.endpoint = endpoint self.name = get_name(endpoint) if name is None else name endpoint_handler = endpoint while isinstance(endpoint_handler, functools.partial): endpoint_handler = endpoint_handler.func if inspect.isfunction(endpoint_handler) or inspect.ismethod(endpoint_handler): # Endpoint is function or method. Treat it as `func(websocket)`. self.app = websocket_session(endpoint) else: # Endpoint is a class. Treat it as ASGI. self.app = endpoint self.path_regex, self.path_format, self.param_convertors = compile_path(path) def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]: if scope["type"] == "websocket": match = self.path_regex.match(scope["path"]) if match: matched_params = match.groupdict() for key, value in matched_params.items(): matched_params[key] = self.param_convertors[key].convert(value) path_params = dict(scope.get("path_params", {})) path_params.update(matched_params) child_scope = {"endpoint": self.endpoint, "path_params": path_params} return Match.FULL, child_scope return Match.NONE, {} def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath: seen_params = set(path_params.keys()) expected_params = set(self.param_convertors.keys()) if name != self.name or seen_params != expected_params: raise NoMatchFound(name, path_params) path, remaining_params = replace_params( self.path_format, self.param_convertors, path_params ) assert not remaining_params return URLPath(path=path, protocol="websocket") async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: await self.app(scope, receive, send) def __eq__(self, other: typing.Any) -> bool: return ( isinstance(other, WebSocketRoute) and self.path == other.path and self.endpoint == other.endpoint ) def __repr__(self) -> str: return f"{self.__class__.__name__}(path={self.path!r}, name={self.name!r})" class Mount(BaseRoute): def __init__( self, path: str, app: typing.Optional[ASGIApp] = None, routes: typing.Optional[typing.Sequence[BaseRoute]] = None, name: typing.Optional[str] = None, *, middleware: typing.Optional[typing.Sequence[Middleware]] = None, ) -> None: assert path == "" or path.startswith("/"), "Routed paths must start with '/'" assert ( app is not None or routes is not None ), "Either 'app=...', or 'routes=' must be specified" self.path = path.rstrip("/") if app is not None: self._base_app: ASGIApp = app else: self._base_app = Router(routes=routes) self.app = self._base_app if middleware is not None: for cls, options in reversed(middleware): self.app = cls(app=self.app, **options) self.name = name self.path_regex, self.path_format, self.param_convertors = compile_path( self.path + "/{path:path}" ) @property def routes(self) -> typing.List[BaseRoute]: return getattr(self._base_app, "routes", []) def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]: if scope["type"] in ("http", "websocket"): path = scope["path"] match = self.path_regex.match(path) if match: matched_params = match.groupdict() for key, value in matched_params.items(): matched_params[key] = self.param_convertors[key].convert(value) remaining_path = "/" + matched_params.pop("path") matched_path = path[: -len(remaining_path)] path_params = dict(scope.get("path_params", {})) path_params.update(matched_params) root_path = scope.get("root_path", "") child_scope = { "path_params": path_params, "app_root_path": scope.get("app_root_path", root_path), "root_path": root_path + matched_path, "path": remaining_path, "endpoint": self.app, } return Match.FULL, child_scope return Match.NONE, {} def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath: if self.name is not None and name == self.name and "path" in path_params: # 'name' matches "<mount_name>". path_params["path"] = path_params["path"].lstrip("/") path, remaining_params = replace_params( self.path_format, self.param_convertors, path_params ) if not remaining_params: return URLPath(path=path) elif self.name is None or name.startswith(self.name + ":"): if self.name is None: # No mount name. remaining_name = name else: # 'name' matches "<mount_name>:<child_name>". remaining_name = name[len(self.name) + 1 :] path_kwarg = path_params.get("path") path_params["path"] = "" path_prefix, remaining_params = replace_params( self.path_format, self.param_convertors, path_params ) if path_kwarg is not None: remaining_params["path"] = path_kwarg for route in self.routes or []: try: url = route.url_path_for(remaining_name, **remaining_params) return URLPath( path=path_prefix.rstrip("/") + str(url), protocol=url.protocol ) except NoMatchFound: pass raise NoMatchFound(name, path_params) async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: await self.app(scope, receive, send) def __eq__(self, other: typing.Any) -> bool: return ( isinstance(other, Mount) and self.path == other.path and self.app == other.app ) def __repr__(self) -> str: class_name = self.__class__.__name__ name = self.name or "" return f"{class_name}(path={self.path!r}, name={name!r}, app={self.app!r})" class Host(BaseRoute): def __init__( self, host: str, app: ASGIApp, name: typing.Optional[str] = None ) -> None: assert not host.startswith("/"), "Host must not start with '/'" self.host = host self.app = app self.name = name self.host_regex, self.host_format, self.param_convertors = compile_path(host) @property def routes(self) -> typing.List[BaseRoute]: return getattr(self.app, "routes", []) def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]: if scope["type"] in ("http", "websocket"): headers = Headers(scope=scope) host = headers.get("host", "").split(":")[0] match = self.host_regex.match(host) if match: matched_params = match.groupdict() for key, value in matched_params.items(): matched_params[key] = self.param_convertors[key].convert(value) path_params = dict(scope.get("path_params", {})) path_params.update(matched_params) child_scope = {"path_params": path_params, "endpoint": self.app} return Match.FULL, child_scope return Match.NONE, {} def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath: if self.name is not None and name == self.name and "path" in path_params: # 'name' matches "<mount_name>". path = path_params.pop("path") host, remaining_params = replace_params( self.host_format, self.param_convertors, path_params ) if not remaining_params: return URLPath(path=path, host=host) elif self.name is None or name.startswith(self.name + ":"): if self.name is None: # No mount name. remaining_name = name else: # 'name' matches "<mount_name>:<child_name>". remaining_name = name[len(self.name) + 1 :] host, remaining_params = replace_params( self.host_format, self.param_convertors, path_params ) for route in self.routes or []: try: url = route.url_path_for(remaining_name, **remaining_params) return URLPath(path=str(url), protocol=url.protocol, host=host) except NoMatchFound: pass raise NoMatchFound(name, path_params) async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: await self.app(scope, receive, send) def __eq__(self, other: typing.Any) -> bool: return ( isinstance(other, Host) and self.host == other.host and self.app == other.app ) def __repr__(self) -> str: class_name = self.__class__.__name__ name = self.name or "" return f"{class_name}(host={self.host!r}, name={name!r}, app={self.app!r})" _T = typing.TypeVar("_T") class _AsyncLiftContextManager(typing.AsyncContextManager[_T]): def __init__(self, cm: typing.ContextManager[_T]): self._cm = cm async def __aenter__(self) -> _T: return self._cm.__enter__() async def __aexit__( self, exc_type: typing.Optional[typing.Type[BaseException]], exc_value: typing.Optional[BaseException], traceback: typing.Optional[types.TracebackType], ) -> typing.Optional[bool]: return self._cm.__exit__(exc_type, exc_value, traceback) def _wrap_gen_lifespan_context( lifespan_context: typing.Callable[[typing.Any], typing.Generator] ) -> typing.Callable[[typing.Any], typing.AsyncContextManager]: cmgr = contextlib.contextmanager(lifespan_context) @functools.wraps(cmgr) def wrapper(app: typing.Any) -> _AsyncLiftContextManager: return _AsyncLiftContextManager(cmgr(app)) return wrapper class _DefaultLifespan: def __init__(self, router: "Router"): self._router = router async def __aenter__(self) -> None: await self._router.startup() async def __aexit__(self, *exc_info: object) -> None: await self._router.shutdown() def __call__(self: _T, app: object) -> _T: return self class Router: def __init__( self, routes: typing.Optional[typing.Sequence[BaseRoute]] = None, redirect_slashes: bool = True, default: typing.Optional[ASGIApp] = None, on_startup: typing.Optional[typing.Sequence[typing.Callable]] = None, on_shutdown: typing.Optional[typing.Sequence[typing.Callable]] = None, lifespan: typing.Optional[ typing.Callable[[typing.Any], typing.AsyncContextManager] ] = None, ) -> None: self.routes = [] if routes is None else list(routes) self.redirect_slashes = redirect_slashes self.default = self.not_found if default is None else default self.on_startup = [] if on_startup is None else list(on_startup) self.on_shutdown = [] if on_shutdown is None else list(on_shutdown) if lifespan is None: self.lifespan_context: typing.Callable[ [typing.Any], typing.AsyncContextManager ] = _DefaultLifespan(self) elif inspect.isasyncgenfunction(lifespan): warnings.warn( "async generator function lifespans are deprecated, " "use an @contextlib.asynccontextmanager function instead", DeprecationWarning, ) self.lifespan_context = asynccontextmanager( lifespan, # type: ignore[arg-type] ) elif inspect.isgeneratorfunction(lifespan): warnings.warn( "generator function lifespans are deprecated, " "use an @contextlib.asynccontextmanager function instead", DeprecationWarning, ) self.lifespan_context = _wrap_gen_lifespan_context( lifespan, # type: ignore[arg-type] ) else: self.lifespan_context = lifespan async def not_found(self, scope: Scope, receive: Receive, send: Send) -> None: if scope["type"] == "websocket": websocket_close = WebSocketClose() await websocket_close(scope, receive, send) return # If we're running inside a starlette application then raise an # exception, so that the configurable exception handler can deal with # returning the response. For plain ASGI apps, just return the response. if "app" in scope: raise HTTPException(status_code=404) else: response = PlainTextResponse("Not Found", status_code=404) await response(scope, receive, send) def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath: for route in self.routes: try: return route.url_path_for(name, **path_params) except NoMatchFound: pass raise NoMatchFound(name, path_params) async def startup(self) -> None: """ Run any `.on_startup` event handlers. """ for handler in self.on_startup: if is_async_callable(handler): await handler() else: handler() async def shutdown(self) -> None: """ Run any `.on_shutdown` event handlers. """ for handler in self.on_shutdown: if is_async_callable(handler): await handler() else: handler() async def lifespan(self, scope: Scope, receive: Receive, send: Send) -> None: """ Handle ASGI lifespan messages, which allows us to manage application startup and shutdown events. """ started = False app = scope.get("app") await receive() try: async with self.lifespan_context(app): await send({"type": "lifespan.startup.complete"}) started = True await receive() except BaseException: exc_text = traceback.format_exc() if started: await send({"type": "lifespan.shutdown.failed", "message": exc_text}) else: await send({"type": "lifespan.startup.failed", "message": exc_text}) raise else: await send({"type": "lifespan.shutdown.complete"}) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: """ The main entry point to the Router class. """ assert scope["type"] in ("http", "websocket", "lifespan") if "router" not in scope: scope["router"] = self if scope["type"] == "lifespan": await self.lifespan(scope, receive, send) return partial = None for route in self.routes: # Determine if any route matches the incoming scope, # and hand over to the matching route if found. match, child_scope = route.matches(scope) if match == Match.FULL: scope.update(child_scope) await route.handle(scope, receive, send) return elif match == Match.PARTIAL and partial is None: partial = route partial_scope = child_scope if partial is not None: #  Handle partial matches. These are cases where an endpoint is # able to handle the request, but is not a preferred option. # We use this in particular to deal with "405 Method Not Allowed". scope.update(partial_scope) await partial.handle(scope, receive, send) return if scope["type"] == "http" and self.redirect_slashes and scope["path"] != "/": redirect_scope = dict(scope) if scope["path"].endswith("/"): redirect_scope["path"] = redirect_scope["path"].rstrip("/") else: redirect_scope["path"] = redirect_scope["path"] + "/" for route in self.routes: match, child_scope = route.matches(redirect_scope) if match != Match.NONE: redirect_url = URL(scope=redirect_scope) response = RedirectResponse(url=str(redirect_url)) await response(scope, receive, send) return await self.default(scope, receive, send) def __eq__(self, other: typing.Any) -> bool: return isinstance(other, Router) and self.routes == other.routes def mount( self, path: str, app: ASGIApp, name: typing.Optional[str] = None ) -> None: # pragma: nocover route = Mount(path, app=app, name=name) self.routes.append(route) def host( self, host: str, app: ASGIApp, name: typing.Optional[str] = None ) -> None: # pragma: no cover route = Host(host, app=app, name=name) self.routes.append(route) def add_route( self, path: str, endpoint: typing.Callable, methods: typing.Optional[typing.List[str]] = None, name: typing.Optional[str] = None, include_in_schema: bool = True, ) -> None: # pragma: nocover route = Route( path, endpoint=endpoint, methods=methods, name=name, include_in_schema=include_in_schema, ) self.routes.append(route) def add_websocket_route( self, path: str, endpoint: typing.Callable, name: typing.Optional[str] = None ) -> None: # pragma: no cover route = WebSocketRoute(path, endpoint=endpoint, name=name) self.routes.append(route) def route( self, path: str, methods: typing.Optional[typing.List[str]] = None, name: typing.Optional[str] = None, include_in_schema: bool = True, ) -> typing.Callable: """ We no longer document this decorator style API, and its usage is discouraged. Instead you should use the following approach: >>> routes = [Route(path, endpoint=...), ...] >>> app = Starlette(routes=routes) """ warnings.warn( "The `route` decorator is deprecated, and will be removed in version 1.0.0." "Refer to https://www.starlette.io/routing/#http-routing for the recommended approach.", # noqa: E501 DeprecationWarning, ) def decorator(func: typing.Callable) -> typing.Callable: self.add_route( path, func, methods=methods, name=name, include_in_schema=include_in_schema, ) return func return decorator def websocket_route( self, path: str, name: typing.Optional[str] = None ) -> typing.Callable: """ We no longer document this decorator style API, and its usage is discouraged. Instead you should use the following approach: >>> routes = [WebSocketRoute(path, endpoint=...), ...] >>> app = Starlette(routes=routes) """ warnings.warn( "The `websocket_route` decorator is deprecated, and will be removed in version 1.0.0. Refer to " # noqa: E501 "https://www.starlette.io/routing/#websocket-routing for the recommended approach.", # noqa: E501 DeprecationWarning, ) def decorator(func: typing.Callable) -> typing.Callable: self.add_websocket_route(path, func, name=name) return func return decorator def add_event_handler( self, event_type: str, func: typing.Callable ) -> None: # pragma: no cover assert event_type in ("startup", "shutdown") if event_type == "startup": self.on_startup.append(func) else: self.on_shutdown.append(func) def on_event(self, event_type: str) -> typing.Callable: warnings.warn( "The `on_event` decorator is deprecated, and will be removed in version 1.0.0. " # noqa: E501 "Refer to https://www.starlette.io/events/#registering-events for recommended approach.", # noqa: E501 DeprecationWarning, ) def decorator(func: typing.Callable) -> typing.Callable: self.add_event_handler(event_type, func) return func return decorator
31,710
Python
36.26322
122
0.577483
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/types.py
import typing Scope = typing.MutableMapping[str, typing.Any] Message = typing.MutableMapping[str, typing.Any] Receive = typing.Callable[[], typing.Awaitable[Message]] Send = typing.Callable[[Message], typing.Awaitable[None]] ASGIApp = typing.Callable[[Scope, Receive, Send], typing.Awaitable[None]]
302
Python
29.299997
73
0.764901
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/_utils.py
import asyncio import functools import sys import typing from types import TracebackType if sys.version_info < (3, 8): # pragma: no cover from typing_extensions import Protocol else: # pragma: no cover from typing import Protocol def is_async_callable(obj: typing.Any) -> bool: while isinstance(obj, functools.partial): obj = obj.func return asyncio.iscoroutinefunction(obj) or ( callable(obj) and asyncio.iscoroutinefunction(obj.__call__) ) T_co = typing.TypeVar("T_co", covariant=True) # TODO: once 3.8 is the minimum supported version (27 Jun 2023) # this can just become # class AwaitableOrContextManager( # typing.Awaitable[T_co], # typing.AsyncContextManager[T_co], # typing.Protocol[T_co], # ): # pass class AwaitableOrContextManager(Protocol[T_co]): def __await__(self) -> typing.Generator[typing.Any, None, T_co]: ... # pragma: no cover async def __aenter__(self) -> T_co: ... # pragma: no cover async def __aexit__( self, __exc_type: typing.Optional[typing.Type[BaseException]], __exc_value: typing.Optional[BaseException], __traceback: typing.Optional[TracebackType], ) -> typing.Union[bool, None]: ... # pragma: no cover class SupportsAsyncClose(Protocol): async def close(self) -> None: ... # pragma: no cover SupportsAsyncCloseType = typing.TypeVar( "SupportsAsyncCloseType", bound=SupportsAsyncClose, covariant=False ) class AwaitableOrContextManagerWrapper(typing.Generic[SupportsAsyncCloseType]): __slots__ = ("aw", "entered") def __init__(self, aw: typing.Awaitable[SupportsAsyncCloseType]) -> None: self.aw = aw def __await__(self) -> typing.Generator[typing.Any, None, SupportsAsyncCloseType]: return self.aw.__await__() async def __aenter__(self) -> SupportsAsyncCloseType: self.entered = await self.aw return self.entered async def __aexit__(self, *args: typing.Any) -> typing.Union[None, bool]: await self.entered.close() return None
2,091
Python
26.893333
86
0.659015
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/staticfiles.py
import importlib.util import os import stat import typing from email.utils import parsedate import anyio from starlette.datastructures import URL, Headers from starlette.exceptions import HTTPException from starlette.responses import FileResponse, RedirectResponse, Response from starlette.types import Receive, Scope, Send PathLike = typing.Union[str, "os.PathLike[str]"] class NotModifiedResponse(Response): NOT_MODIFIED_HEADERS = ( "cache-control", "content-location", "date", "etag", "expires", "vary", ) def __init__(self, headers: Headers): super().__init__( status_code=304, headers={ name: value for name, value in headers.items() if name in self.NOT_MODIFIED_HEADERS }, ) class StaticFiles: def __init__( self, *, directory: typing.Optional[PathLike] = None, packages: typing.Optional[ typing.List[typing.Union[str, typing.Tuple[str, str]]] ] = None, html: bool = False, check_dir: bool = True, follow_symlink: bool = False, ) -> None: self.directory = directory self.packages = packages self.all_directories = self.get_directories(directory, packages) self.html = html self.config_checked = False self.follow_symlink = follow_symlink if check_dir and directory is not None and not os.path.isdir(directory): raise RuntimeError(f"Directory '{directory}' does not exist") def get_directories( self, directory: typing.Optional[PathLike] = None, packages: typing.Optional[ typing.List[typing.Union[str, typing.Tuple[str, str]]] ] = None, ) -> typing.List[PathLike]: """ Given `directory` and `packages` arguments, return a list of all the directories that should be used for serving static files from. """ directories = [] if directory is not None: directories.append(directory) for package in packages or []: if isinstance(package, tuple): package, statics_dir = package else: statics_dir = "statics" spec = importlib.util.find_spec(package) assert spec is not None, f"Package {package!r} could not be found." assert spec.origin is not None, f"Package {package!r} could not be found." package_directory = os.path.normpath( os.path.join(spec.origin, "..", statics_dir) ) assert os.path.isdir( package_directory ), f"Directory '{statics_dir!r}' in package {package!r} could not be found." directories.append(package_directory) return directories async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: """ The ASGI entry point. """ assert scope["type"] == "http" if not self.config_checked: await self.check_config() self.config_checked = True path = self.get_path(scope) response = await self.get_response(path, scope) await response(scope, receive, send) def get_path(self, scope: Scope) -> str: """ Given the ASGI scope, return the `path` string to serve up, with OS specific path separators, and any '..', '.' components removed. """ return os.path.normpath(os.path.join(*scope["path"].split("/"))) async def get_response(self, path: str, scope: Scope) -> Response: """ Returns an HTTP response, given the incoming path, method and request headers. """ if scope["method"] not in ("GET", "HEAD"): raise HTTPException(status_code=405) try: full_path, stat_result = await anyio.to_thread.run_sync( self.lookup_path, path ) except PermissionError: raise HTTPException(status_code=401) except OSError: raise if stat_result and stat.S_ISREG(stat_result.st_mode): # We have a static file to serve. return self.file_response(full_path, stat_result, scope) elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html: # We're in HTML mode, and have got a directory URL. # Check if we have 'index.html' file to serve. index_path = os.path.join(path, "index.html") full_path, stat_result = await anyio.to_thread.run_sync( self.lookup_path, index_path ) if stat_result is not None and stat.S_ISREG(stat_result.st_mode): if not scope["path"].endswith("/"): # Directory URLs should redirect to always end in "/". url = URL(scope=scope) url = url.replace(path=url.path + "/") return RedirectResponse(url=url) return self.file_response(full_path, stat_result, scope) if self.html: # Check for '404.html' if we're in HTML mode. full_path, stat_result = await anyio.to_thread.run_sync( self.lookup_path, "404.html" ) if stat_result and stat.S_ISREG(stat_result.st_mode): return FileResponse( full_path, stat_result=stat_result, method=scope["method"], status_code=404, ) raise HTTPException(status_code=404) def lookup_path( self, path: str ) -> typing.Tuple[str, typing.Optional[os.stat_result]]: for directory in self.all_directories: joined_path = os.path.join(directory, path) if self.follow_symlink: full_path = os.path.abspath(joined_path) else: full_path = os.path.realpath(joined_path) directory = os.path.realpath(directory) if os.path.commonprefix([full_path, directory]) != directory: # Don't allow misbehaving clients to break out of the static files # directory. continue try: return full_path, os.stat(full_path) except (FileNotFoundError, NotADirectoryError): continue return "", None def file_response( self, full_path: PathLike, stat_result: os.stat_result, scope: Scope, status_code: int = 200, ) -> Response: method = scope["method"] request_headers = Headers(scope=scope) response = FileResponse( full_path, status_code=status_code, stat_result=stat_result, method=method ) if self.is_not_modified(response.headers, request_headers): return NotModifiedResponse(response.headers) return response async def check_config(self) -> None: """ Perform a one-off configuration check that StaticFiles is actually pointed at a directory, so that we can raise loud errors rather than just returning 404 responses. """ if self.directory is None: return try: stat_result = await anyio.to_thread.run_sync(os.stat, self.directory) except FileNotFoundError: raise RuntimeError( f"StaticFiles directory '{self.directory}' does not exist." ) if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)): raise RuntimeError( f"StaticFiles path '{self.directory}' is not a directory." ) def is_not_modified( self, response_headers: Headers, request_headers: Headers ) -> bool: """ Given the request and response headers, return `True` if an HTTP "Not Modified" response could be returned instead. """ try: if_none_match = request_headers["if-none-match"] etag = response_headers["etag"] if if_none_match == etag: return True except KeyError: pass try: if_modified_since = parsedate(request_headers["if-modified-since"]) last_modified = parsedate(response_headers["last-modified"]) if ( if_modified_since is not None and last_modified is not None and if_modified_since >= last_modified ): return True except KeyError: pass return False
8,735
Python
34.368421
88
0.565197
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/starlette/requests.py
import json import typing from http import cookies as http_cookies import anyio from starlette._utils import AwaitableOrContextManager, AwaitableOrContextManagerWrapper from starlette.datastructures import URL, Address, FormData, Headers, QueryParams, State from starlette.exceptions import HTTPException from starlette.formparsers import FormParser, MultiPartException, MultiPartParser from starlette.types import Message, Receive, Scope, Send try: from multipart.multipart import parse_options_header except ImportError: # pragma: nocover parse_options_header = None if typing.TYPE_CHECKING: from starlette.routing import Router SERVER_PUSH_HEADERS_TO_COPY = { "accept", "accept-encoding", "accept-language", "cache-control", "user-agent", } def cookie_parser(cookie_string: str) -> typing.Dict[str, str]: """ This function parses a ``Cookie`` HTTP header into a dict of key/value pairs. It attempts to mimic browser cookie parsing behavior: browsers and web servers frequently disregard the spec (RFC 6265) when setting and reading cookies, so we attempt to suit the common scenarios here. This function has been adapted from Django 3.1.0. Note: we are explicitly _NOT_ using `SimpleCookie.load` because it is based on an outdated spec and will fail on lots of input we want to support """ cookie_dict: typing.Dict[str, str] = {} for chunk in cookie_string.split(";"): if "=" in chunk: key, val = chunk.split("=", 1) else: # Assume an empty name per # https://bugzilla.mozilla.org/show_bug.cgi?id=169091 key, val = "", chunk key, val = key.strip(), val.strip() if key or val: # unquote using Python's algorithm. cookie_dict[key] = http_cookies._unquote(val) return cookie_dict class ClientDisconnect(Exception): pass class HTTPConnection(typing.Mapping[str, typing.Any]): """ A base class for incoming HTTP connections, that is used to provide any functionality that is common to both `Request` and `WebSocket`. """ def __init__(self, scope: Scope, receive: typing.Optional[Receive] = None) -> None: assert scope["type"] in ("http", "websocket") self.scope = scope def __getitem__(self, key: str) -> typing.Any: return self.scope[key] def __iter__(self) -> typing.Iterator[str]: return iter(self.scope) def __len__(self) -> int: return len(self.scope) # Don't use the `abc.Mapping.__eq__` implementation. # Connection instances should never be considered equal # unless `self is other`. __eq__ = object.__eq__ __hash__ = object.__hash__ @property def app(self) -> typing.Any: return self.scope["app"] @property def url(self) -> URL: if not hasattr(self, "_url"): self._url = URL(scope=self.scope) return self._url @property def base_url(self) -> URL: if not hasattr(self, "_base_url"): base_url_scope = dict(self.scope) base_url_scope["path"] = "/" base_url_scope["query_string"] = b"" base_url_scope["root_path"] = base_url_scope.get( "app_root_path", base_url_scope.get("root_path", "") ) self._base_url = URL(scope=base_url_scope) return self._base_url @property def headers(self) -> Headers: if not hasattr(self, "_headers"): self._headers = Headers(scope=self.scope) return self._headers @property def query_params(self) -> QueryParams: if not hasattr(self, "_query_params"): self._query_params = QueryParams(self.scope["query_string"]) return self._query_params @property def path_params(self) -> typing.Dict[str, typing.Any]: return self.scope.get("path_params", {}) @property def cookies(self) -> typing.Dict[str, str]: if not hasattr(self, "_cookies"): cookies: typing.Dict[str, str] = {} cookie_header = self.headers.get("cookie") if cookie_header: cookies = cookie_parser(cookie_header) self._cookies = cookies return self._cookies @property def client(self) -> typing.Optional[Address]: # client is a 2 item tuple of (host, port), None or missing host_port = self.scope.get("client") if host_port is not None: return Address(*host_port) return None @property def session(self) -> typing.Dict[str, typing.Any]: assert ( "session" in self.scope ), "SessionMiddleware must be installed to access request.session" return self.scope["session"] @property def auth(self) -> typing.Any: assert ( "auth" in self.scope ), "AuthenticationMiddleware must be installed to access request.auth" return self.scope["auth"] @property def user(self) -> typing.Any: assert ( "user" in self.scope ), "AuthenticationMiddleware must be installed to access request.user" return self.scope["user"] @property def state(self) -> State: if not hasattr(self, "_state"): # Ensure 'state' has an empty dict if it's not already populated. self.scope.setdefault("state", {}) # Create a state instance with a reference to the dict in which it should # store info self._state = State(self.scope["state"]) return self._state def url_for(self, name: str, **path_params: typing.Any) -> str: router: Router = self.scope["router"] url_path = router.url_path_for(name, **path_params) return url_path.make_absolute_url(base_url=self.base_url) async def empty_receive() -> typing.NoReturn: raise RuntimeError("Receive channel has not been made available") async def empty_send(message: Message) -> typing.NoReturn: raise RuntimeError("Send channel has not been made available") class Request(HTTPConnection): _form: typing.Optional[FormData] def __init__( self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send ): super().__init__(scope) assert scope["type"] == "http" self._receive = receive self._send = send self._stream_consumed = False self._is_disconnected = False self._form = None @property def method(self) -> str: return self.scope["method"] @property def receive(self) -> Receive: return self._receive async def stream(self) -> typing.AsyncGenerator[bytes, None]: if hasattr(self, "_body"): yield self._body yield b"" return if self._stream_consumed: raise RuntimeError("Stream consumed") self._stream_consumed = True while True: message = await self._receive() if message["type"] == "http.request": body = message.get("body", b"") if body: yield body if not message.get("more_body", False): break elif message["type"] == "http.disconnect": self._is_disconnected = True raise ClientDisconnect() yield b"" async def body(self) -> bytes: if not hasattr(self, "_body"): chunks: "typing.List[bytes]" = [] async for chunk in self.stream(): chunks.append(chunk) self._body = b"".join(chunks) return self._body async def json(self) -> typing.Any: if not hasattr(self, "_json"): body = await self.body() self._json = json.loads(body) return self._json async def _get_form( self, *, max_files: typing.Union[int, float] = 1000, max_fields: typing.Union[int, float] = 1000, ) -> FormData: if self._form is None: assert ( parse_options_header is not None ), "The `python-multipart` library must be installed to use form parsing." content_type_header = self.headers.get("Content-Type") content_type: bytes content_type, _ = parse_options_header(content_type_header) if content_type == b"multipart/form-data": try: multipart_parser = MultiPartParser( self.headers, self.stream(), max_files=max_files, max_fields=max_fields, ) self._form = await multipart_parser.parse() except MultiPartException as exc: if "app" in self.scope: raise HTTPException(status_code=400, detail=exc.message) raise exc elif content_type == b"application/x-www-form-urlencoded": form_parser = FormParser(self.headers, self.stream()) self._form = await form_parser.parse() else: self._form = FormData() return self._form def form( self, *, max_files: typing.Union[int, float] = 1000, max_fields: typing.Union[int, float] = 1000, ) -> AwaitableOrContextManager[FormData]: return AwaitableOrContextManagerWrapper( self._get_form(max_files=max_files, max_fields=max_fields) ) async def close(self) -> None: if self._form is not None: await self._form.close() async def is_disconnected(self) -> bool: if not self._is_disconnected: message: Message = {} # If message isn't immediately available, move on with anyio.CancelScope() as cs: cs.cancel() message = await self._receive() if message.get("type") == "http.disconnect": self._is_disconnected = True return self._is_disconnected async def send_push_promise(self, path: str) -> None: if "http.response.push" in self.scope.get("extensions", {}): raw_headers: "typing.List[typing.Tuple[bytes, bytes]]" = [] for name in SERVER_PUSH_HEADERS_TO_COPY: for value in self.headers.getlist(name): raw_headers.append( (name.encode("latin-1"), value.encode("latin-1")) ) await self._send( {"type": "http.response.push", "path": path, "headers": raw_headers} )
10,745
Python
32.68652
88
0.578595