response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Command-line wrapper to re-run a script whenever its source changes. Scripts may be specified by filename or module name:: python -m tornado.autoreload -m tornado.test.runtests python -m tornado.autoreload tornado/test/runtests.py Running a script with this wrapper is similar to calling `tornado.autoreload.wait` at the end of the script, but this wrapper can catch import-time problems like syntax errors that would otherwise prevent the script from reaching its call to `wait`.
def main() -> None: """Command-line wrapper to re-run a script whenever its source changes. Scripts may be specified by filename or module name:: python -m tornado.autoreload -m tornado.test.runtests python -m tornado.autoreload tornado/test/runtests.py Running a script with this wrapper is similar to calling `tornado.autoreload.wait` at the end of the script, but this wrapper can catch import-time problems like syntax errors that would otherwise prevent the script from reaching its call to `wait`. """ # Remember that we were launched with autoreload as main. # The main module can be tricky; set the variables both in our globals # (which may be __main__) and the real importable version. # # We use optparse instead of the newer argparse because we want to # mimic the python command-line interface which requires stopping # parsing at the first positional argument. optparse supports # this but as far as I can tell argparse does not. import optparse import tornado.autoreload global _autoreload_is_main global _original_argv, _original_spec tornado.autoreload._autoreload_is_main = _autoreload_is_main = True original_argv = sys.argv tornado.autoreload._original_argv = _original_argv = original_argv original_spec = getattr(sys.modules["__main__"], "__spec__", None) tornado.autoreload._original_spec = _original_spec = original_spec parser = optparse.OptionParser( prog="python -m tornado.autoreload", usage=_USAGE, epilog="Either -m or a path must be specified, but not both", ) parser.disable_interspersed_args() parser.add_option("-m", dest="module", metavar="module", help="module to run") parser.add_option( "--until-success", action="store_true", help="stop reloading after the program exist successfully (status code 0)", ) opts, rest = parser.parse_args() if opts.module is None: if not rest: print("Either -m or a path must be specified", file=sys.stderr) sys.exit(1) path = rest[0] sys.argv = rest[:] else: path = None sys.argv = [sys.argv[0]] + rest # SystemExit.code is typed funny: https://github.com/python/typeshed/issues/8513 # All we care about is truthiness exit_status: Union[int, str, None] = 1 try: import runpy if opts.module is not None: runpy.run_module(opts.module, run_name="__main__", alter_sys=True) else: assert path is not None runpy.run_path(path, run_name="__main__") except SystemExit as e: exit_status = e.code gen_log.info("Script exited with status %s", e.code) except Exception as e: gen_log.warning("Script exited with uncaught exception", exc_info=True) # If an exception occurred at import time, the file with the error # never made it into sys.modules and so we won't know to watch it. # Just to make sure we've covered everything, walk the stack trace # from the exception and watch every file. for filename, lineno, name, line in traceback.extract_tb(sys.exc_info()[2]): watch(filename) if isinstance(e, SyntaxError): # SyntaxErrors are special: their innermost stack frame is fake # so extract_tb won't see it and we have to get the filename # from the exception object. if e.filename is not None: watch(e.filename) else: exit_status = 0 gen_log.info("Script exited normally") # restore sys.argv so subsequent executions will include autoreload sys.argv = original_argv if opts.module is not None: assert opts.module is not None # runpy did a fake import of the module as __main__, but now it's # no longer in sys.modules. Figure out where it is and watch it. loader = pkgutil.get_loader(opts.module) if loader is not None and isinstance(loader, importlib.abc.FileLoader): watch(loader.get_filename()) if opts.until_success and not exit_status: return wait()
Decorator to run a synchronous method asynchronously on an executor. Returns a future. The executor to be used is determined by the ``executor`` attributes of ``self``. To use a different attribute name, pass a keyword argument to the decorator:: @run_on_executor(executor='_thread_pool') def foo(self): pass This decorator should not be confused with the similarly-named `.IOLoop.run_in_executor`. In general, using ``run_in_executor`` when *calling* a blocking method is recommended instead of using this decorator when *defining* a method. If compatibility with older versions of Tornado is required, consider defining an executor and using ``executor.submit()`` at the call site. .. versionchanged:: 4.2 Added keyword arguments to use alternative attributes. .. versionchanged:: 5.0 Always uses the current IOLoop instead of ``self.io_loop``. .. versionchanged:: 5.1 Returns a `.Future` compatible with ``await`` instead of a `concurrent.futures.Future`. .. deprecated:: 5.1 The ``callback`` argument is deprecated and will be removed in 6.0. The decorator itself is discouraged in new code but will not be removed in 6.0. .. versionchanged:: 6.0 The ``callback`` argument was removed.
def run_on_executor(*args: Any, **kwargs: Any) -> Callable: """Decorator to run a synchronous method asynchronously on an executor. Returns a future. The executor to be used is determined by the ``executor`` attributes of ``self``. To use a different attribute name, pass a keyword argument to the decorator:: @run_on_executor(executor='_thread_pool') def foo(self): pass This decorator should not be confused with the similarly-named `.IOLoop.run_in_executor`. In general, using ``run_in_executor`` when *calling* a blocking method is recommended instead of using this decorator when *defining* a method. If compatibility with older versions of Tornado is required, consider defining an executor and using ``executor.submit()`` at the call site. .. versionchanged:: 4.2 Added keyword arguments to use alternative attributes. .. versionchanged:: 5.0 Always uses the current IOLoop instead of ``self.io_loop``. .. versionchanged:: 5.1 Returns a `.Future` compatible with ``await`` instead of a `concurrent.futures.Future`. .. deprecated:: 5.1 The ``callback`` argument is deprecated and will be removed in 6.0. The decorator itself is discouraged in new code but will not be removed in 6.0. .. versionchanged:: 6.0 The ``callback`` argument was removed. """ # Fully type-checking decorators is tricky, and this one is # discouraged anyway so it doesn't have all the generic magic. def run_on_executor_decorator(fn: Callable) -> Callable[..., Future]: executor = kwargs.get("executor", "executor") @functools.wraps(fn) def wrapper(self: Any, *args: Any, **kwargs: Any) -> Future: async_future = Future() # type: Future conc_future = getattr(self, executor).submit(fn, self, *args, **kwargs) chain_future(conc_future, async_future) return async_future return wrapper if args and kwargs: raise ValueError("cannot combine positional and keyword args") if len(args) == 1: return run_on_executor_decorator(args[0]) elif len(args) != 0: raise ValueError("expected 1 argument, got %d", len(args)) return run_on_executor_decorator
Chain two futures together so that when one completes, so does the other. The result (success or failure) of ``a`` will be copied to ``b``, unless ``b`` has already been completed or cancelled by the time ``a`` finishes. .. versionchanged:: 5.0 Now accepts both Tornado/asyncio `Future` objects and `concurrent.futures.Future`.
def chain_future(a: "Future[_T]", b: "Future[_T]") -> None: """Chain two futures together so that when one completes, so does the other. The result (success or failure) of ``a`` will be copied to ``b``, unless ``b`` has already been completed or cancelled by the time ``a`` finishes. .. versionchanged:: 5.0 Now accepts both Tornado/asyncio `Future` objects and `concurrent.futures.Future`. """ def copy(a: "Future[_T]") -> None: if b.done(): return if hasattr(a, "exc_info") and a.exc_info() is not None: # type: ignore future_set_exc_info(b, a.exc_info()) # type: ignore else: a_exc = a.exception() if a_exc is not None: b.set_exception(a_exc) else: b.set_result(a.result()) if isinstance(a, Future): future_add_done_callback(a, copy) else: # concurrent.futures.Future from tornado.ioloop import IOLoop IOLoop.current().add_future(a, copy)
Set the given ``value`` as the `Future`'s result, if not cancelled. Avoids ``asyncio.InvalidStateError`` when calling ``set_result()`` on a cancelled `asyncio.Future`. .. versionadded:: 5.0
def future_set_result_unless_cancelled( future: "Union[futures.Future[_T], Future[_T]]", value: _T ) -> None: """Set the given ``value`` as the `Future`'s result, if not cancelled. Avoids ``asyncio.InvalidStateError`` when calling ``set_result()`` on a cancelled `asyncio.Future`. .. versionadded:: 5.0 """ if not future.cancelled(): future.set_result(value)
Set the given ``exc`` as the `Future`'s exception. If the Future is already canceled, logs the exception instead. If this logging is not desired, the caller should explicitly check the state of the Future and call ``Future.set_exception`` instead of this wrapper. Avoids ``asyncio.InvalidStateError`` when calling ``set_exception()`` on a cancelled `asyncio.Future`. .. versionadded:: 6.0
def future_set_exception_unless_cancelled( future: "Union[futures.Future[_T], Future[_T]]", exc: BaseException ) -> None: """Set the given ``exc`` as the `Future`'s exception. If the Future is already canceled, logs the exception instead. If this logging is not desired, the caller should explicitly check the state of the Future and call ``Future.set_exception`` instead of this wrapper. Avoids ``asyncio.InvalidStateError`` when calling ``set_exception()`` on a cancelled `asyncio.Future`. .. versionadded:: 6.0 """ if not future.cancelled(): future.set_exception(exc) else: app_log.error("Exception after Future was cancelled", exc_info=exc)
Set the given ``exc_info`` as the `Future`'s exception. Understands both `asyncio.Future` and the extensions in older versions of Tornado to enable better tracebacks on Python 2. .. versionadded:: 5.0 .. versionchanged:: 6.0 If the future is already cancelled, this function is a no-op. (previously ``asyncio.InvalidStateError`` would be raised)
def future_set_exc_info( future: "Union[futures.Future[_T], Future[_T]]", exc_info: Tuple[ Optional[type], Optional[BaseException], Optional[types.TracebackType] ], ) -> None: """Set the given ``exc_info`` as the `Future`'s exception. Understands both `asyncio.Future` and the extensions in older versions of Tornado to enable better tracebacks on Python 2. .. versionadded:: 5.0 .. versionchanged:: 6.0 If the future is already cancelled, this function is a no-op. (previously ``asyncio.InvalidStateError`` would be raised) """ if exc_info[1] is None: raise Exception("future_set_exc_info called with no exception") future_set_exception_unless_cancelled(future, exc_info[1])
Arrange to call ``callback`` when ``future`` is complete. ``callback`` is invoked with one argument, the ``future``. If ``future`` is already done, ``callback`` is invoked immediately. This may differ from the behavior of ``Future.add_done_callback``, which makes no such guarantee. .. versionadded:: 5.0
def future_add_done_callback( # noqa: F811 future: "Union[futures.Future[_T], Future[_T]]", callback: Callable[..., None] ) -> None: """Arrange to call ``callback`` when ``future`` is complete. ``callback`` is invoked with one argument, the ``future``. If ``future`` is already done, ``callback`` is invoked immediately. This may differ from the behavior of ``Future.add_done_callback``, which makes no such guarantee. .. versionadded:: 5.0 """ if future.done(): callback(future) else: future.add_done_callback(callback)
Escapes a string so it is valid within HTML or XML. Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``. When used in attribute values the escaped strings must be enclosed in quotes. Equivalent to `html.escape` except that this function always returns type `str` while `html.escape` returns `bytes` if its input is `bytes`. .. versionchanged:: 3.2 Added the single quote to the list of escaped characters. .. versionchanged:: 6.4 Now simply wraps `html.escape`. This is equivalent to the old behavior except that single quotes are now escaped as ``&#x27;`` instead of ``&#39;`` and performance may be different.
def xhtml_escape(value: Union[str, bytes]) -> str: """Escapes a string so it is valid within HTML or XML. Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``. When used in attribute values the escaped strings must be enclosed in quotes. Equivalent to `html.escape` except that this function always returns type `str` while `html.escape` returns `bytes` if its input is `bytes`. .. versionchanged:: 3.2 Added the single quote to the list of escaped characters. .. versionchanged:: 6.4 Now simply wraps `html.escape`. This is equivalent to the old behavior except that single quotes are now escaped as ``&#x27;`` instead of ``&#39;`` and performance may be different. """ return html.escape(to_unicode(value))
Un-escapes an XML-escaped string. Equivalent to `html.unescape` except that this function always returns type `str` while `html.unescape` returns `bytes` if its input is `bytes`. .. versionchanged:: 6.4 Now simply wraps `html.unescape`. This changes behavior for some inputs as required by the HTML 5 specification https://html.spec.whatwg.org/multipage/parsing.html#numeric-character-reference-end-state Some invalid inputs such as surrogates now raise an error, and numeric references to certain ISO-8859-1 characters are now handled correctly.
def xhtml_unescape(value: Union[str, bytes]) -> str: """Un-escapes an XML-escaped string. Equivalent to `html.unescape` except that this function always returns type `str` while `html.unescape` returns `bytes` if its input is `bytes`. .. versionchanged:: 6.4 Now simply wraps `html.unescape`. This changes behavior for some inputs as required by the HTML 5 specification https://html.spec.whatwg.org/multipage/parsing.html#numeric-character-reference-end-state Some invalid inputs such as surrogates now raise an error, and numeric references to certain ISO-8859-1 characters are now handled correctly. """ return html.unescape(to_unicode(value))
JSON-encodes the given Python object. Equivalent to `json.dumps` with the additional guarantee that the output will never contain the character sequence ``</`` which can be problematic when JSON is embedded in an HTML ``<script>`` tag.
def json_encode(value: Any) -> str: """JSON-encodes the given Python object. Equivalent to `json.dumps` with the additional guarantee that the output will never contain the character sequence ``</`` which can be problematic when JSON is embedded in an HTML ``<script>`` tag. """ # JSON permits but does not require forward slashes to be escaped. # This is useful when json data is emitted in a <script> tag # in HTML, as it prevents </script> tags from prematurely terminating # the JavaScript. Some json libraries do this escaping by default, # although python's standard library does not, so we do it here. # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped return json.dumps(value).replace("</", "<\\/")
Returns Python objects for the given JSON string. Supports both `str` and `bytes` inputs. Equvalent to `json.loads`.
def json_decode(value: Union[str, bytes]) -> Any: """Returns Python objects for the given JSON string. Supports both `str` and `bytes` inputs. Equvalent to `json.loads`. """ return json.loads(value)
Replace all sequences of whitespace chars with a single space.
def squeeze(value: str) -> str: """Replace all sequences of whitespace chars with a single space.""" return re.sub(r"[\x00-\x20]+", " ", value).strip()
Returns a URL-encoded version of the given value. Equivalent to either `urllib.parse.quote_plus` or `urllib.parse.quote` depending on the ``plus`` argument. If ``plus`` is true (the default), spaces will be represented as ``+`` and slashes will be represented as ``%2F``. This is appropriate for query strings. If ``plus`` is false, spaces will be represented as ``%20`` and slashes are left as-is. This is appropriate for the path component of a URL. Note that the default of ``plus=True`` is effectively the reverse of Python's urllib module. .. versionadded:: 3.1 The ``plus`` argument
def url_escape(value: Union[str, bytes], plus: bool = True) -> str: """Returns a URL-encoded version of the given value. Equivalent to either `urllib.parse.quote_plus` or `urllib.parse.quote` depending on the ``plus`` argument. If ``plus`` is true (the default), spaces will be represented as ``+`` and slashes will be represented as ``%2F``. This is appropriate for query strings. If ``plus`` is false, spaces will be represented as ``%20`` and slashes are left as-is. This is appropriate for the path component of a URL. Note that the default of ``plus=True`` is effectively the reverse of Python's urllib module. .. versionadded:: 3.1 The ``plus`` argument """ quote = urllib.parse.quote_plus if plus else urllib.parse.quote return quote(value)
Decodes the given value from a URL. The argument may be either a byte or unicode string. If encoding is None, the result will be a byte string and this function is equivalent to `urllib.parse.unquote_to_bytes` if ``plus=False``. Otherwise, the result is a unicode string in the specified encoding and this function is equivalent to either `urllib.parse.unquote_plus` or `urllib.parse.unquote` except that this function also accepts `bytes` as input. If ``plus`` is true (the default), plus signs will be interpreted as spaces (literal plus signs must be represented as "%2B"). This is appropriate for query strings and form-encoded values but not for the path component of a URL. Note that this default is the reverse of Python's urllib module. .. versionadded:: 3.1 The ``plus`` argument
def url_unescape( value: Union[str, bytes], encoding: Optional[str] = "utf-8", plus: bool = True ) -> Union[str, bytes]: """Decodes the given value from a URL. The argument may be either a byte or unicode string. If encoding is None, the result will be a byte string and this function is equivalent to `urllib.parse.unquote_to_bytes` if ``plus=False``. Otherwise, the result is a unicode string in the specified encoding and this function is equivalent to either `urllib.parse.unquote_plus` or `urllib.parse.unquote` except that this function also accepts `bytes` as input. If ``plus`` is true (the default), plus signs will be interpreted as spaces (literal plus signs must be represented as "%2B"). This is appropriate for query strings and form-encoded values but not for the path component of a URL. Note that this default is the reverse of Python's urllib module. .. versionadded:: 3.1 The ``plus`` argument """ if encoding is None: if plus: # unquote_to_bytes doesn't have a _plus variant value = to_basestring(value).replace("+", " ") return urllib.parse.unquote_to_bytes(value) else: unquote = urllib.parse.unquote_plus if plus else urllib.parse.unquote return unquote(to_basestring(value), encoding=encoding)
Parses a query string like urlparse.parse_qs, but takes bytes and returns the values as byte strings. Keys still become type str (interpreted as latin1 in python3!) because it's too painful to keep them as byte strings in python3 and in practice they're nearly always ascii anyway.
def parse_qs_bytes( qs: Union[str, bytes], keep_blank_values: bool = False, strict_parsing: bool = False ) -> Dict[str, List[bytes]]: """Parses a query string like urlparse.parse_qs, but takes bytes and returns the values as byte strings. Keys still become type str (interpreted as latin1 in python3!) because it's too painful to keep them as byte strings in python3 and in practice they're nearly always ascii anyway. """ # This is gross, but python3 doesn't give us another way. # Latin1 is the universal donor of character encodings. if isinstance(qs, bytes): qs = qs.decode("latin1") result = urllib.parse.parse_qs( qs, keep_blank_values, strict_parsing, encoding="latin1", errors="strict" ) encoded = {} for k, v in result.items(): encoded[k] = [i.encode("latin1") for i in v] return encoded
Converts a string argument to a byte string. If the argument is already a byte string or None, it is returned unchanged. Otherwise it must be a unicode string and is encoded as utf8.
def utf8(value: Union[None, str, bytes]) -> Optional[bytes]: """Converts a string argument to a byte string. If the argument is already a byte string or None, it is returned unchanged. Otherwise it must be a unicode string and is encoded as utf8. """ if isinstance(value, _UTF8_TYPES): return value if not isinstance(value, unicode_type): raise TypeError("Expected bytes, unicode, or None; got %r" % type(value)) return value.encode("utf-8")
Converts a string argument to a unicode string. If the argument is already a unicode string or None, it is returned unchanged. Otherwise it must be a byte string and is decoded as utf8.
def to_unicode(value: Union[None, str, bytes]) -> Optional[str]: """Converts a string argument to a unicode string. If the argument is already a unicode string or None, it is returned unchanged. Otherwise it must be a byte string and is decoded as utf8. """ if isinstance(value, _TO_UNICODE_TYPES): return value if not isinstance(value, bytes): raise TypeError("Expected bytes, unicode, or None; got %r" % type(value)) return value.decode("utf-8")
Walks a simple data structure, converting byte strings to unicode. Supports lists, tuples, and dictionaries.
def recursive_unicode(obj: Any) -> Any: """Walks a simple data structure, converting byte strings to unicode. Supports lists, tuples, and dictionaries. """ if isinstance(obj, dict): return dict( (recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items() ) elif isinstance(obj, list): return list(recursive_unicode(i) for i in obj) elif isinstance(obj, tuple): return tuple(recursive_unicode(i) for i in obj) elif isinstance(obj, bytes): return to_unicode(obj) else: return obj
Converts plain text into HTML with links. For example: ``linkify("Hello http://tornadoweb.org!")`` would return ``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!`` Parameters: * ``shorten``: Long urls will be shortened for display. * ``extra_params``: Extra text to include in the link tag, or a callable taking the link as an argument and returning the extra text e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``, or:: def extra_params_cb(url): if url.startswith("http://example.com"): return 'class="internal"' else: return 'class="external" rel="nofollow"' linkify(text, extra_params=extra_params_cb) * ``require_protocol``: Only linkify urls which include a protocol. If this is False, urls such as www.facebook.com will also be linkified. * ``permitted_protocols``: List (or set) of protocols which should be linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp", "mailto"])``. It is very unsafe to include protocols such as ``javascript``.
def linkify( text: Union[str, bytes], shorten: bool = False, extra_params: Union[str, Callable[[str], str]] = "", require_protocol: bool = False, permitted_protocols: List[str] = ["http", "https"], ) -> str: """Converts plain text into HTML with links. For example: ``linkify("Hello http://tornadoweb.org!")`` would return ``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!`` Parameters: * ``shorten``: Long urls will be shortened for display. * ``extra_params``: Extra text to include in the link tag, or a callable taking the link as an argument and returning the extra text e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``, or:: def extra_params_cb(url): if url.startswith("http://example.com"): return 'class="internal"' else: return 'class="external" rel="nofollow"' linkify(text, extra_params=extra_params_cb) * ``require_protocol``: Only linkify urls which include a protocol. If this is False, urls such as www.facebook.com will also be linkified. * ``permitted_protocols``: List (or set) of protocols which should be linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp", "mailto"])``. It is very unsafe to include protocols such as ``javascript``. """ if extra_params and not callable(extra_params): extra_params = " " + extra_params.strip() def make_link(m: typing.Match) -> str: url = m.group(1) proto = m.group(2) if require_protocol and not proto: return url # not protocol, no linkify if proto and proto not in permitted_protocols: return url # bad protocol, no linkify href = m.group(1) if not proto: href = "http://" + href # no proto specified, use http if callable(extra_params): params = " " + extra_params(href).strip() else: params = extra_params # clip long urls. max_len is just an approximation max_len = 30 if shorten and len(url) > max_len: before_clip = url if proto: proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for : else: proto_len = 0 parts = url[proto_len:].split("/") if len(parts) > 1: # Grab the whole host part plus the first bit of the path # The path is usually not that interesting once shortened # (no more slug, etc), so it really just provides a little # extra indication of shortening. url = ( url[:proto_len] + parts[0] + "/" + parts[1][:8].split("?")[0].split(".")[0] ) if len(url) > max_len * 1.5: # still too long url = url[:max_len] if url != before_clip: amp = url.rfind("&") # avoid splitting html char entities if amp > max_len - 5: url = url[:amp] url += "..." if len(url) >= len(before_clip): url = before_clip else: # full url is visible on mouse-over (for those who don't # have a status bar, such as Safari by default) params += ' title="%s"' % href return '<a href="%s"%s>%s</a>' % (href, params, url) # First HTML-escape so that our strings are all safe. # The regex is modified to avoid character entites other than &amp; so # that we won't pick up &quot;, etc. text = _unicode(xhtml_escape(text)) return _URL_RE.sub(make_link, text)
Decorator for asynchronous generators. For compatibility with older versions of Python, coroutines may also "return" by raising the special exception `Return(value) <Return>`. Functions with this decorator return a `.Future`. .. warning:: When exceptions occur inside a coroutine, the exception information will be stored in the `.Future` object. You must examine the result of the `.Future` object, or the exception may go unnoticed by your code. This means yielding the function if called from another coroutine, using something like `.IOLoop.run_sync` for top-level calls, or passing the `.Future` to `.IOLoop.add_future`. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead.
def coroutine( func: Union[Callable[..., "Generator[Any, Any, _T]"], Callable[..., _T]] ) -> Callable[..., "Future[_T]"]: """Decorator for asynchronous generators. For compatibility with older versions of Python, coroutines may also "return" by raising the special exception `Return(value) <Return>`. Functions with this decorator return a `.Future`. .. warning:: When exceptions occur inside a coroutine, the exception information will be stored in the `.Future` object. You must examine the result of the `.Future` object, or the exception may go unnoticed by your code. This means yielding the function if called from another coroutine, using something like `.IOLoop.run_sync` for top-level calls, or passing the `.Future` to `.IOLoop.add_future`. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ @functools.wraps(func) def wrapper(*args, **kwargs): # type: (*Any, **Any) -> Future[_T] # This function is type-annotated with a comment to work around # https://bitbucket.org/pypy/pypy/issues/2868/segfault-with-args-type-annotation-in future = _create_future() if contextvars is not None: ctx_run = contextvars.copy_context().run # type: Callable else: ctx_run = _fake_ctx_run try: result = ctx_run(func, *args, **kwargs) except (Return, StopIteration) as e: result = _value_from_stopiteration(e) except Exception: future_set_exc_info(future, sys.exc_info()) try: return future finally: # Avoid circular references future = None # type: ignore else: if isinstance(result, Generator): # Inline the first iteration of Runner.run. This lets us # avoid the cost of creating a Runner when the coroutine # never actually yields, which in turn allows us to # use "optional" coroutines in critical path code without # performance penalty for the synchronous case. try: yielded = ctx_run(next, result) except (StopIteration, Return) as e: future_set_result_unless_cancelled( future, _value_from_stopiteration(e) ) except Exception: future_set_exc_info(future, sys.exc_info()) else: # Provide strong references to Runner objects as long # as their result future objects also have strong # references (typically from the parent coroutine's # Runner). This keeps the coroutine's Runner alive. # We do this by exploiting the public API # add_done_callback() instead of putting a private # attribute on the Future. # (GitHub issues #1769, #2229). runner = Runner(ctx_run, result, future, yielded) future.add_done_callback(lambda _: runner) yielded = None try: return future finally: # Subtle memory optimization: if next() raised an exception, # the future's exc_info contains a traceback which # includes this stack frame. This creates a cycle, # which will be collected at the next full GC but has # been shown to greatly increase memory usage of # benchmarks (relative to the refcount-based scheme # used in the absence of cycles). We can avoid the # cycle by clearing the local variable after we return it. future = None # type: ignore future_set_result_unless_cancelled(future, result) return future wrapper.__wrapped__ = func # type: ignore wrapper.__tornado_coroutine__ = True # type: ignore return wrapper
Return whether *func* is a coroutine function, i.e. a function wrapped with `~.gen.coroutine`. .. versionadded:: 4.5
def is_coroutine_function(func: Any) -> bool: """Return whether *func* is a coroutine function, i.e. a function wrapped with `~.gen.coroutine`. .. versionadded:: 4.5 """ return getattr(func, "__tornado_coroutine__", False)
Runs multiple asynchronous operations in parallel. ``children`` may either be a list or a dict whose values are yieldable objects. ``multi()`` returns a new yieldable object that resolves to a parallel structure containing their results. If ``children`` is a list, the result is a list of results in the same order; if it is a dict, the result is a dict with the same keys. That is, ``results = yield multi(list_of_futures)`` is equivalent to:: results = [] for future in list_of_futures: results.append(yield future) If any children raise exceptions, ``multi()`` will raise the first one. All others will be logged, unless they are of types contained in the ``quiet_exceptions`` argument. In a ``yield``-based coroutine, it is not normally necessary to call this function directly, since the coroutine runner will do it automatically when a list or dict is yielded. However, it is necessary in ``await``-based coroutines, or to pass the ``quiet_exceptions`` argument. This function is available under the names ``multi()`` and ``Multi()`` for historical reasons. Cancelling a `.Future` returned by ``multi()`` does not cancel its children. `asyncio.gather` is similar to ``multi()``, but it does cancel its children. .. versionchanged:: 4.2 If multiple yieldables fail, any exceptions after the first (which is raised) will be logged. Added the ``quiet_exceptions`` argument to suppress this logging for selected exception types. .. versionchanged:: 4.3 Replaced the class ``Multi`` and the function ``multi_future`` with a unified function ``multi``. Added support for yieldables other than ``YieldPoint`` and `.Future`.
def multi( children: Union[List[_Yieldable], Dict[Any, _Yieldable]], quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (), ) -> "Union[Future[List], Future[Dict]]": """Runs multiple asynchronous operations in parallel. ``children`` may either be a list or a dict whose values are yieldable objects. ``multi()`` returns a new yieldable object that resolves to a parallel structure containing their results. If ``children`` is a list, the result is a list of results in the same order; if it is a dict, the result is a dict with the same keys. That is, ``results = yield multi(list_of_futures)`` is equivalent to:: results = [] for future in list_of_futures: results.append(yield future) If any children raise exceptions, ``multi()`` will raise the first one. All others will be logged, unless they are of types contained in the ``quiet_exceptions`` argument. In a ``yield``-based coroutine, it is not normally necessary to call this function directly, since the coroutine runner will do it automatically when a list or dict is yielded. However, it is necessary in ``await``-based coroutines, or to pass the ``quiet_exceptions`` argument. This function is available under the names ``multi()`` and ``Multi()`` for historical reasons. Cancelling a `.Future` returned by ``multi()`` does not cancel its children. `asyncio.gather` is similar to ``multi()``, but it does cancel its children. .. versionchanged:: 4.2 If multiple yieldables fail, any exceptions after the first (which is raised) will be logged. Added the ``quiet_exceptions`` argument to suppress this logging for selected exception types. .. versionchanged:: 4.3 Replaced the class ``Multi`` and the function ``multi_future`` with a unified function ``multi``. Added support for yieldables other than ``YieldPoint`` and `.Future`. """ return multi_future(children, quiet_exceptions=quiet_exceptions)
Wait for multiple asynchronous futures in parallel. Since Tornado 6.0, this function is exactly the same as `multi`. .. versionadded:: 4.0 .. versionchanged:: 4.2 If multiple ``Futures`` fail, any exceptions after the first (which is raised) will be logged. Added the ``quiet_exceptions`` argument to suppress this logging for selected exception types. .. deprecated:: 4.3 Use `multi` instead.
def multi_future( children: Union[List[_Yieldable], Dict[Any, _Yieldable]], quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (), ) -> "Union[Future[List], Future[Dict]]": """Wait for multiple asynchronous futures in parallel. Since Tornado 6.0, this function is exactly the same as `multi`. .. versionadded:: 4.0 .. versionchanged:: 4.2 If multiple ``Futures`` fail, any exceptions after the first (which is raised) will be logged. Added the ``quiet_exceptions`` argument to suppress this logging for selected exception types. .. deprecated:: 4.3 Use `multi` instead. """ if isinstance(children, dict): keys = list(children.keys()) # type: Optional[List] children_seq = children.values() # type: Iterable else: keys = None children_seq = children children_futs = list(map(convert_yielded, children_seq)) assert all(is_future(i) or isinstance(i, _NullFuture) for i in children_futs) unfinished_children = set(children_futs) future = _create_future() if not children_futs: future_set_result_unless_cancelled(future, {} if keys is not None else []) def callback(fut: Future) -> None: unfinished_children.remove(fut) if not unfinished_children: result_list = [] for f in children_futs: try: result_list.append(f.result()) except Exception as e: if future.done(): if not isinstance(e, quiet_exceptions): app_log.error( "Multiple exceptions in yield list", exc_info=True ) else: future_set_exc_info(future, sys.exc_info()) if not future.done(): if keys is not None: future_set_result_unless_cancelled( future, dict(zip(keys, result_list)) ) else: future_set_result_unless_cancelled(future, result_list) listening = set() # type: Set[Future] for f in children_futs: if f not in listening: listening.add(f) future_add_done_callback(f, callback) return future
Converts ``x`` into a `.Future`. If ``x`` is already a `.Future`, it is simply returned; otherwise it is wrapped in a new `.Future`. This is suitable for use as ``result = yield gen.maybe_future(f())`` when you don't know whether ``f()`` returns a `.Future` or not. .. deprecated:: 4.3 This function only handles ``Futures``, not other yieldable objects. Instead of `maybe_future`, check for the non-future result types you expect (often just ``None``), and ``yield`` anything unknown.
def maybe_future(x: Any) -> Future: """Converts ``x`` into a `.Future`. If ``x`` is already a `.Future`, it is simply returned; otherwise it is wrapped in a new `.Future`. This is suitable for use as ``result = yield gen.maybe_future(f())`` when you don't know whether ``f()`` returns a `.Future` or not. .. deprecated:: 4.3 This function only handles ``Futures``, not other yieldable objects. Instead of `maybe_future`, check for the non-future result types you expect (often just ``None``), and ``yield`` anything unknown. """ if is_future(x): return x else: fut = _create_future() fut.set_result(x) return fut
Wraps a `.Future` (or other yieldable object) in a timeout. Raises `tornado.util.TimeoutError` if the input future does not complete before ``timeout``, which may be specified in any form allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time relative to `.IOLoop.time`) If the wrapped `.Future` fails after it has timed out, the exception will be logged unless it is either of a type contained in ``quiet_exceptions`` (which may be an exception type or a sequence of types), or an ``asyncio.CancelledError``. The wrapped `.Future` is not canceled when the timeout expires, permitting it to be reused. `asyncio.wait_for` is similar to this function but it does cancel the wrapped `.Future` on timeout. .. versionadded:: 4.0 .. versionchanged:: 4.1 Added the ``quiet_exceptions`` argument and the logging of unhandled exceptions. .. versionchanged:: 4.4 Added support for yieldable objects other than `.Future`. .. versionchanged:: 6.0.3 ``asyncio.CancelledError`` is now always considered "quiet". .. versionchanged:: 6.2 ``tornado.util.TimeoutError`` is now an alias to ``asyncio.TimeoutError``.
def with_timeout( timeout: Union[float, datetime.timedelta], future: _Yieldable, quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (), ) -> Future: """Wraps a `.Future` (or other yieldable object) in a timeout. Raises `tornado.util.TimeoutError` if the input future does not complete before ``timeout``, which may be specified in any form allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time relative to `.IOLoop.time`) If the wrapped `.Future` fails after it has timed out, the exception will be logged unless it is either of a type contained in ``quiet_exceptions`` (which may be an exception type or a sequence of types), or an ``asyncio.CancelledError``. The wrapped `.Future` is not canceled when the timeout expires, permitting it to be reused. `asyncio.wait_for` is similar to this function but it does cancel the wrapped `.Future` on timeout. .. versionadded:: 4.0 .. versionchanged:: 4.1 Added the ``quiet_exceptions`` argument and the logging of unhandled exceptions. .. versionchanged:: 4.4 Added support for yieldable objects other than `.Future`. .. versionchanged:: 6.0.3 ``asyncio.CancelledError`` is now always considered "quiet". .. versionchanged:: 6.2 ``tornado.util.TimeoutError`` is now an alias to ``asyncio.TimeoutError``. """ # It's tempting to optimize this by cancelling the input future on timeout # instead of creating a new one, but A) we can't know if we are the only # one waiting on the input future, so cancelling it might disrupt other # callers and B) concurrent futures can only be cancelled while they are # in the queue, so cancellation cannot reliably bound our waiting time. future_converted = convert_yielded(future) result = _create_future() chain_future(future_converted, result) io_loop = IOLoop.current() def error_callback(future: Future) -> None: try: future.result() except asyncio.CancelledError: pass except Exception as e: if not isinstance(e, quiet_exceptions): app_log.error( "Exception in Future %r after timeout", future, exc_info=True ) def timeout_callback() -> None: if not result.done(): result.set_exception(TimeoutError("Timeout")) # In case the wrapped future goes on to fail, log it. future_add_done_callback(future_converted, error_callback) timeout_handle = io_loop.add_timeout(timeout, timeout_callback) if isinstance(future_converted, Future): # We know this future will resolve on the IOLoop, so we don't # need the extra thread-safety of IOLoop.add_future (and we also # don't care about StackContext here. future_add_done_callback( future_converted, lambda future: io_loop.remove_timeout(timeout_handle) ) else: # concurrent.futures.Futures may resolve on any thread, so we # need to route them back to the IOLoop. io_loop.add_future( future_converted, lambda future: io_loop.remove_timeout(timeout_handle) ) return result
Return a `.Future` that resolves after the given number of seconds. When used with ``yield`` in a coroutine, this is a non-blocking analogue to `time.sleep` (which should not be used in coroutines because it is blocking):: yield gen.sleep(0.5) Note that calling this function on its own does nothing; you must wait on the `.Future` it returns (usually by yielding it). .. versionadded:: 4.1
def sleep(duration: float) -> "Future[None]": """Return a `.Future` that resolves after the given number of seconds. When used with ``yield`` in a coroutine, this is a non-blocking analogue to `time.sleep` (which should not be used in coroutines because it is blocking):: yield gen.sleep(0.5) Note that calling this function on its own does nothing; you must wait on the `.Future` it returns (usually by yielding it). .. versionadded:: 4.1 """ f = _create_future() IOLoop.current().call_later( duration, lambda: future_set_result_unless_cancelled(f, None) ) return f
Convert a yielded object into a `.Future`. The default implementation accepts lists, dictionaries, and Futures. This has the side effect of starting any coroutines that did not start themselves, similar to `asyncio.ensure_future`. If the `~functools.singledispatch` library is available, this function may be extended to support additional types. For example:: @convert_yielded.register(asyncio.Future) def _(asyncio_future): return tornado.platform.asyncio.to_tornado_future(asyncio_future) .. versionadded:: 4.1
def convert_yielded(yielded: _Yieldable) -> Future: """Convert a yielded object into a `.Future`. The default implementation accepts lists, dictionaries, and Futures. This has the side effect of starting any coroutines that did not start themselves, similar to `asyncio.ensure_future`. If the `~functools.singledispatch` library is available, this function may be extended to support additional types. For example:: @convert_yielded.register(asyncio.Future) def _(asyncio_future): return tornado.platform.asyncio.to_tornado_future(asyncio_future) .. versionadded:: 4.1 """ if yielded is None or yielded is moment: return moment elif yielded is _null_future: return _null_future elif isinstance(yielded, (list, dict)): return multi(yielded) # type: ignore elif is_future(yielded): return typing.cast(Future, yielded) elif isawaitable(yielded): return _wrap_awaitable(yielded) # type: ignore else: raise BadYieldError("yielded unknown object %r" % (yielded,))
Parse a non-negative integer from a string.
def parse_int(s: str) -> int: """Parse a non-negative integer from a string.""" if DIGITS.fullmatch(s) is None: raise ValueError("not an integer: %r" % s) return int(s)
Parse a non-negative hexadecimal integer from a string.
def parse_hex_int(s: str) -> int: """Parse a non-negative hexadecimal integer from a string.""" if HEXDIGITS.fullmatch(s) is None: raise ValueError("not a hexadecimal integer: %r" % s) return int(s, 16)
Map a header name to Http-Header-Case. >>> _normalize_header("coNtent-TYPE") 'Content-Type'
def _normalize_header(name: str) -> str: """Map a header name to Http-Header-Case. >>> _normalize_header("coNtent-TYPE") 'Content-Type' """ return "-".join([w.capitalize() for w in name.split("-")])
Concatenate url and arguments regardless of whether url has existing query parameters. ``args`` may be either a dictionary or a list of key-value pairs (the latter allows for multiple values with the same key. >>> url_concat("http://example.com/foo", dict(c="d")) 'http://example.com/foo?c=d' >>> url_concat("http://example.com/foo?a=b", dict(c="d")) 'http://example.com/foo?a=b&c=d' >>> url_concat("http://example.com/foo?a=b", [("c", "d"), ("c", "d2")]) 'http://example.com/foo?a=b&c=d&c=d2'
def url_concat( url: str, args: Union[ None, Dict[str, str], List[Tuple[str, str]], Tuple[Tuple[str, str], ...] ], ) -> str: """Concatenate url and arguments regardless of whether url has existing query parameters. ``args`` may be either a dictionary or a list of key-value pairs (the latter allows for multiple values with the same key. >>> url_concat("http://example.com/foo", dict(c="d")) 'http://example.com/foo?c=d' >>> url_concat("http://example.com/foo?a=b", dict(c="d")) 'http://example.com/foo?a=b&c=d' >>> url_concat("http://example.com/foo?a=b", [("c", "d"), ("c", "d2")]) 'http://example.com/foo?a=b&c=d&c=d2' """ if args is None: return url parsed_url = urlparse(url) if isinstance(args, dict): parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True) parsed_query.extend(args.items()) elif isinstance(args, list) or isinstance(args, tuple): parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True) parsed_query.extend(args) else: err = "'args' parameter should be dict, list or tuple. Not {0}".format( type(args) ) raise TypeError(err) final_query = urlencode(parsed_query) url = urlunparse( ( parsed_url[0], parsed_url[1], parsed_url[2], parsed_url[3], final_query, parsed_url[5], ) ) return url
Parses a Range header. Returns either ``None`` or tuple ``(start, end)``. Note that while the HTTP headers use inclusive byte positions, this method returns indexes suitable for use in slices. >>> start, end = _parse_request_range("bytes=1-2") >>> start, end (1, 3) >>> [0, 1, 2, 3, 4][start:end] [1, 2] >>> _parse_request_range("bytes=6-") (6, None) >>> _parse_request_range("bytes=-6") (-6, None) >>> _parse_request_range("bytes=-0") (None, 0) >>> _parse_request_range("bytes=") (None, None) >>> _parse_request_range("foo=42") >>> _parse_request_range("bytes=1-2,6-10") Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed). See [0] for the details of the range header. [0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges
def _parse_request_range( range_header: str, ) -> Optional[Tuple[Optional[int], Optional[int]]]: """Parses a Range header. Returns either ``None`` or tuple ``(start, end)``. Note that while the HTTP headers use inclusive byte positions, this method returns indexes suitable for use in slices. >>> start, end = _parse_request_range("bytes=1-2") >>> start, end (1, 3) >>> [0, 1, 2, 3, 4][start:end] [1, 2] >>> _parse_request_range("bytes=6-") (6, None) >>> _parse_request_range("bytes=-6") (-6, None) >>> _parse_request_range("bytes=-0") (None, 0) >>> _parse_request_range("bytes=") (None, None) >>> _parse_request_range("foo=42") >>> _parse_request_range("bytes=1-2,6-10") Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed). See [0] for the details of the range header. [0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges """ unit, _, value = range_header.partition("=") unit, value = unit.strip(), value.strip() if unit != "bytes": return None start_b, _, end_b = value.partition("-") try: start = _int_or_none(start_b) end = _int_or_none(end_b) except ValueError: return None if end is not None: if start is None: if end != 0: start = -end end = None else: end += 1 return (start, end)
Returns a suitable Content-Range header: >>> print(_get_content_range(None, 1, 4)) bytes 0-0/4 >>> print(_get_content_range(1, 3, 4)) bytes 1-2/4 >>> print(_get_content_range(None, None, 4)) bytes 0-3/4
def _get_content_range(start: Optional[int], end: Optional[int], total: int) -> str: """Returns a suitable Content-Range header: >>> print(_get_content_range(None, 1, 4)) bytes 0-0/4 >>> print(_get_content_range(1, 3, 4)) bytes 1-2/4 >>> print(_get_content_range(None, None, 4)) bytes 0-3/4 """ start = start or 0 end = (end or total) - 1 return "bytes %s-%s/%s" % (start, end, total)
Parses a form request body. Supports ``application/x-www-form-urlencoded`` and ``multipart/form-data``. The ``content_type`` parameter should be a string and ``body`` should be a byte string. The ``arguments`` and ``files`` parameters are dictionaries that will be updated with the parsed contents.
def parse_body_arguments( content_type: str, body: bytes, arguments: Dict[str, List[bytes]], files: Dict[str, List[HTTPFile]], headers: Optional[HTTPHeaders] = None, ) -> None: """Parses a form request body. Supports ``application/x-www-form-urlencoded`` and ``multipart/form-data``. The ``content_type`` parameter should be a string and ``body`` should be a byte string. The ``arguments`` and ``files`` parameters are dictionaries that will be updated with the parsed contents. """ if content_type.startswith("application/x-www-form-urlencoded"): if headers and "Content-Encoding" in headers: gen_log.warning( "Unsupported Content-Encoding: %s", headers["Content-Encoding"] ) return try: # real charset decoding will happen in RequestHandler.decode_argument() uri_arguments = parse_qs_bytes(body, keep_blank_values=True) except Exception as e: gen_log.warning("Invalid x-www-form-urlencoded body: %s", e) uri_arguments = {} for name, values in uri_arguments.items(): if values: arguments.setdefault(name, []).extend(values) elif content_type.startswith("multipart/form-data"): if headers and "Content-Encoding" in headers: gen_log.warning( "Unsupported Content-Encoding: %s", headers["Content-Encoding"] ) return try: fields = content_type.split(";") for field in fields: k, sep, v = field.strip().partition("=") if k == "boundary" and v: parse_multipart_form_data(utf8(v), body, arguments, files) break else: raise ValueError("multipart boundary not found") except Exception as e: gen_log.warning("Invalid multipart/form-data: %s", e)
Parses a ``multipart/form-data`` body. The ``boundary`` and ``data`` parameters are both byte strings. The dictionaries given in the arguments and files parameters will be updated with the contents of the body. .. versionchanged:: 5.1 Now recognizes non-ASCII filenames in RFC 2231/5987 (``filename*=``) format.
def parse_multipart_form_data( boundary: bytes, data: bytes, arguments: Dict[str, List[bytes]], files: Dict[str, List[HTTPFile]], ) -> None: """Parses a ``multipart/form-data`` body. The ``boundary`` and ``data`` parameters are both byte strings. The dictionaries given in the arguments and files parameters will be updated with the contents of the body. .. versionchanged:: 5.1 Now recognizes non-ASCII filenames in RFC 2231/5987 (``filename*=``) format. """ # The standard allows for the boundary to be quoted in the header, # although it's rare (it happens at least for google app engine # xmpp). I think we're also supposed to handle backslash-escapes # here but I'll save that until we see a client that uses them # in the wild. if boundary.startswith(b'"') and boundary.endswith(b'"'): boundary = boundary[1:-1] final_boundary_index = data.rfind(b"--" + boundary + b"--") if final_boundary_index == -1: gen_log.warning("Invalid multipart/form-data: no final boundary") return parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n") for part in parts: if not part: continue eoh = part.find(b"\r\n\r\n") if eoh == -1: gen_log.warning("multipart/form-data missing headers") continue headers = HTTPHeaders.parse(part[:eoh].decode("utf-8")) disp_header = headers.get("Content-Disposition", "") disposition, disp_params = _parse_header(disp_header) if disposition != "form-data" or not part.endswith(b"\r\n"): gen_log.warning("Invalid multipart/form-data") continue value = part[eoh + 4 : -2] if not disp_params.get("name"): gen_log.warning("multipart/form-data value missing name") continue name = disp_params["name"] if disp_params.get("filename"): ctype = headers.get("Content-Type", "application/unknown") files.setdefault(name, []).append( HTTPFile( filename=disp_params["filename"], body=value, content_type=ctype ) ) else: arguments.setdefault(name, []).append(value)
Formats a timestamp in the format used by HTTP. The argument may be a numeric timestamp as returned by `time.time`, a time tuple as returned by `time.gmtime`, or a `datetime.datetime` object. Naive `datetime.datetime` objects are assumed to represent UTC; aware objects are converted to UTC before formatting. >>> format_timestamp(1359312200) 'Sun, 27 Jan 2013 18:43:20 GMT'
def format_timestamp( ts: Union[int, float, tuple, time.struct_time, datetime.datetime] ) -> str: """Formats a timestamp in the format used by HTTP. The argument may be a numeric timestamp as returned by `time.time`, a time tuple as returned by `time.gmtime`, or a `datetime.datetime` object. Naive `datetime.datetime` objects are assumed to represent UTC; aware objects are converted to UTC before formatting. >>> format_timestamp(1359312200) 'Sun, 27 Jan 2013 18:43:20 GMT' """ if isinstance(ts, (int, float)): time_num = ts elif isinstance(ts, (tuple, time.struct_time)): time_num = calendar.timegm(ts) elif isinstance(ts, datetime.datetime): time_num = calendar.timegm(ts.utctimetuple()) else: raise TypeError("unknown timestamp type: %r" % ts) return email.utils.formatdate(time_num, usegmt=True)
Returns a (method, path, version) tuple for an HTTP 1.x request line. The response is a `collections.namedtuple`. >>> parse_request_start_line("GET /foo HTTP/1.1") RequestStartLine(method='GET', path='/foo', version='HTTP/1.1')
def parse_request_start_line(line: str) -> RequestStartLine: """Returns a (method, path, version) tuple for an HTTP 1.x request line. The response is a `collections.namedtuple`. >>> parse_request_start_line("GET /foo HTTP/1.1") RequestStartLine(method='GET', path='/foo', version='HTTP/1.1') """ try: method, path, version = line.split(" ") except ValueError: # https://tools.ietf.org/html/rfc7230#section-3.1.1 # invalid request-line SHOULD respond with a 400 (Bad Request) raise HTTPInputError("Malformed HTTP request line") if not _http_version_re.match(version): raise HTTPInputError( "Malformed HTTP version in HTTP Request-Line: %r" % version ) return RequestStartLine(method, path, version)
Returns a (version, code, reason) tuple for an HTTP 1.x response line. The response is a `collections.namedtuple`. >>> parse_response_start_line("HTTP/1.1 200 OK") ResponseStartLine(version='HTTP/1.1', code=200, reason='OK')
def parse_response_start_line(line: str) -> ResponseStartLine: """Returns a (version, code, reason) tuple for an HTTP 1.x response line. The response is a `collections.namedtuple`. >>> parse_response_start_line("HTTP/1.1 200 OK") ResponseStartLine(version='HTTP/1.1', code=200, reason='OK') """ line = native_str(line) match = _http_response_line_re.match(line) if not match: raise HTTPInputError("Error parsing response start line") return ResponseStartLine(match.group(1), int(match.group(2)), match.group(3))
Parse a Content-type like header. Return the main content-type and a dictionary of options. >>> d = "form-data; foo=\"b\\\\a\\\"r\"; file*=utf-8''T%C3%A4st" >>> ct, d = _parse_header(d) >>> ct 'form-data' >>> d['file'] == r'T\u00e4st'.encode('ascii').decode('unicode_escape') True >>> d['foo'] 'b\\a"r'
def _parse_header(line: str) -> Tuple[str, Dict[str, str]]: r"""Parse a Content-type like header. Return the main content-type and a dictionary of options. >>> d = "form-data; foo=\"b\\\\a\\\"r\"; file*=utf-8''T%C3%A4st" >>> ct, d = _parse_header(d) >>> ct 'form-data' >>> d['file'] == r'T\u00e4st'.encode('ascii').decode('unicode_escape') True >>> d['foo'] 'b\\a"r' """ parts = _parseparam(";" + line) key = next(parts) # decode_params treats first argument special, but we already stripped key params = [("Dummy", "value")] for p in parts: i = p.find("=") if i >= 0: name = p[:i].strip().lower() value = p[i + 1 :].strip() params.append((name, native_str(value))) decoded_params = email.utils.decode_params(params) decoded_params.pop(0) # get rid of the dummy again pdict = {} for name, decoded_value in decoded_params: value = email.utils.collapse_rfc2231_value(decoded_value) if len(value) >= 2 and value[0] == '"' and value[-1] == '"': value = value[1:-1] pdict[name] = value return key, pdict
Inverse of _parse_header. >>> _encode_header('permessage-deflate', ... {'client_max_window_bits': 15, 'client_no_context_takeover': None}) 'permessage-deflate; client_max_window_bits=15; client_no_context_takeover'
def _encode_header(key: str, pdict: Dict[str, str]) -> str: """Inverse of _parse_header. >>> _encode_header('permessage-deflate', ... {'client_max_window_bits': 15, 'client_no_context_takeover': None}) 'permessage-deflate; client_max_window_bits=15; client_no_context_takeover' """ if not pdict: return key out = [key] # Sort the parameters just to make it easy to test. for k, v in sorted(pdict.items()): if v is None: out.append(k) else: # TODO: quote if necessary. out.append("%s=%s" % (k, v)) return "; ".join(out)
Encodes a username/password pair in the format used by HTTP auth. The return value is a byte string in the form ``username:password``. .. versionadded:: 5.1
def encode_username_password( username: Union[str, bytes], password: Union[str, bytes] ) -> bytes: """Encodes a username/password pair in the format used by HTTP auth. The return value is a byte string in the form ``username:password``. .. versionadded:: 5.1 """ if isinstance(username, unicode_type): username = unicodedata.normalize("NFC", username) if isinstance(password, unicode_type): password = unicodedata.normalize("NFC", password) return utf8(username) + b":" + utf8(password)
Returns ``(host, port)`` tuple from ``netloc``. Returned ``port`` will be ``None`` if not present. .. versionadded:: 4.1
def split_host_and_port(netloc: str) -> Tuple[str, Optional[int]]: """Returns ``(host, port)`` tuple from ``netloc``. Returned ``port`` will be ``None`` if not present. .. versionadded:: 4.1 """ match = _netloc_re.match(netloc) if match: host = match.group(1) port = int(match.group(2)) # type: Optional[int] else: host = netloc port = None return (host, port)
Generator converting a result of ``parse_qs`` back to name-value pairs. .. versionadded:: 5.0
def qs_to_qsl(qs: Dict[str, List[AnyStr]]) -> Iterable[Tuple[str, AnyStr]]: """Generator converting a result of ``parse_qs`` back to name-value pairs. .. versionadded:: 5.0 """ for k, vs in qs.items(): for v in vs: yield (k, v)
Handle double quotes and escaping in cookie values. This method is copied verbatim from the Python 3.5 standard library (http.cookies._unquote) so we don't have to depend on non-public interfaces.
def _unquote_cookie(s: str) -> str: """Handle double quotes and escaping in cookie values. This method is copied verbatim from the Python 3.5 standard library (http.cookies._unquote) so we don't have to depend on non-public interfaces. """ # If there aren't any doublequotes, # then there can't be any special characters. See RFC 2109. if s is None or len(s) < 2: return s if s[0] != '"' or s[-1] != '"': return s # We have to assume that we must decode this string. # Down to work. # Remove the "s s = s[1:-1] # Check for special sequences. Examples: # \012 --> \n # \" --> " # i = 0 n = len(s) res = [] while 0 <= i < n: o_match = _OctalPatt.search(s, i) q_match = _QuotePatt.search(s, i) if not o_match and not q_match: # Neither matched res.append(s[i:]) break # else: j = k = -1 if o_match: j = o_match.start(0) if q_match: k = q_match.start(0) if q_match and (not o_match or k < j): # QuotePatt matched res.append(s[i:k]) res.append(s[k + 1]) i = k + 2 else: # OctalPatt matched res.append(s[i:j]) res.append(chr(int(s[j + 1 : j + 4], 8))) i = j + 4 return _nulljoin(res)
Parse a ``Cookie`` HTTP header into a dict of name/value pairs. This function attempts to mimic browser cookie parsing behavior; it specifically does not follow any of the cookie-related RFCs (because browsers don't either). The algorithm used is identical to that used by Django version 1.9.10. .. versionadded:: 4.4.2
def parse_cookie(cookie: str) -> Dict[str, str]: """Parse a ``Cookie`` HTTP header into a dict of name/value pairs. This function attempts to mimic browser cookie parsing behavior; it specifically does not follow any of the cookie-related RFCs (because browsers don't either). The algorithm used is identical to that used by Django version 1.9.10. .. versionadded:: 4.4.2 """ cookiedict = {} for chunk in cookie.split(str(";")): if str("=") in chunk: key, val = chunk.split(str("="), 1) else: # Assume an empty name per # https://bugzilla.mozilla.org/show_bug.cgi?id=169091 key, val = str(""), chunk key, val = key.strip(), val.strip() if key or val: # unquote using Python's algorithm. cookiedict[key] = _unquote_cookie(val) return cookiedict
Returns the closest match for the given locale codes. We iterate over all given locale codes in order. If we have a tight or a loose match for the code (e.g., "en" for "en_US"), we return the locale. Otherwise we move to the next code in the list. By default we return ``en_US`` if no translations are found for any of the specified locales. You can change the default locale with `set_default_locale()`.
def get(*locale_codes: str) -> "Locale": """Returns the closest match for the given locale codes. We iterate over all given locale codes in order. If we have a tight or a loose match for the code (e.g., "en" for "en_US"), we return the locale. Otherwise we move to the next code in the list. By default we return ``en_US`` if no translations are found for any of the specified locales. You can change the default locale with `set_default_locale()`. """ return Locale.get_closest(*locale_codes)
Sets the default locale. The default locale is assumed to be the language used for all strings in the system. The translations loaded from disk are mappings from the default locale to the destination locale. Consequently, you don't need to create a translation file for the default locale.
def set_default_locale(code: str) -> None: """Sets the default locale. The default locale is assumed to be the language used for all strings in the system. The translations loaded from disk are mappings from the default locale to the destination locale. Consequently, you don't need to create a translation file for the default locale. """ global _default_locale global _supported_locales _default_locale = code _supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
Loads translations from CSV files in a directory. Translations are strings with optional Python-style named placeholders (e.g., ``My name is %(name)s``) and their associated translations. The directory should have translation files of the form ``LOCALE.csv``, e.g. ``es_GT.csv``. The CSV files should have two or three columns: string, translation, and an optional plural indicator. Plural indicators should be one of "plural" or "singular". A given string can have both singular and plural forms. For example ``%(name)s liked this`` may have a different verb conjugation depending on whether %(name)s is one name or a list of names. There should be two rows in the CSV file for that string, one with plural indicator "singular", and one "plural". For strings with no verbs that would change on translation, simply use "unknown" or the empty string (or don't include the column at all). The file is read using the `csv` module in the default "excel" dialect. In this format there should not be spaces after the commas. If no ``encoding`` parameter is given, the encoding will be detected automatically (among UTF-8 and UTF-16) if the file contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM is present. Example translation ``es_LA.csv``:: "I love you","Te amo" "%(name)s liked this","A %(name)s les gustó esto","plural" "%(name)s liked this","A %(name)s le gustó esto","singular" .. versionchanged:: 4.3 Added ``encoding`` parameter. Added support for BOM-based encoding detection, UTF-16, and UTF-8-with-BOM.
def load_translations(directory: str, encoding: Optional[str] = None) -> None: """Loads translations from CSV files in a directory. Translations are strings with optional Python-style named placeholders (e.g., ``My name is %(name)s``) and their associated translations. The directory should have translation files of the form ``LOCALE.csv``, e.g. ``es_GT.csv``. The CSV files should have two or three columns: string, translation, and an optional plural indicator. Plural indicators should be one of "plural" or "singular". A given string can have both singular and plural forms. For example ``%(name)s liked this`` may have a different verb conjugation depending on whether %(name)s is one name or a list of names. There should be two rows in the CSV file for that string, one with plural indicator "singular", and one "plural". For strings with no verbs that would change on translation, simply use "unknown" or the empty string (or don't include the column at all). The file is read using the `csv` module in the default "excel" dialect. In this format there should not be spaces after the commas. If no ``encoding`` parameter is given, the encoding will be detected automatically (among UTF-8 and UTF-16) if the file contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM is present. Example translation ``es_LA.csv``:: "I love you","Te amo" "%(name)s liked this","A %(name)s les gustó esto","plural" "%(name)s liked this","A %(name)s le gustó esto","singular" .. versionchanged:: 4.3 Added ``encoding`` parameter. Added support for BOM-based encoding detection, UTF-16, and UTF-8-with-BOM. """ global _translations global _supported_locales _translations = {} for path in os.listdir(directory): if not path.endswith(".csv"): continue locale, extension = path.split(".") if not re.match("[a-z]+(_[A-Z]+)?$", locale): gen_log.error( "Unrecognized locale %r (path: %s)", locale, os.path.join(directory, path), ) continue full_path = os.path.join(directory, path) if encoding is None: # Try to autodetect encoding based on the BOM. with open(full_path, "rb") as bf: data = bf.read(len(codecs.BOM_UTF16_LE)) if data in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): encoding = "utf-16" else: # utf-8-sig is "utf-8 with optional BOM". It's discouraged # in most cases but is common with CSV files because Excel # cannot read utf-8 files without a BOM. encoding = "utf-8-sig" # python 3: csv.reader requires a file open in text mode. # Specify an encoding to avoid dependence on $LANG environment variable. with open(full_path, encoding=encoding) as f: _translations[locale] = {} for i, row in enumerate(csv.reader(f)): if not row or len(row) < 2: continue row = [escape.to_unicode(c).strip() for c in row] english, translation = row[:2] if len(row) > 2: plural = row[2] or "unknown" else: plural = "unknown" if plural not in ("plural", "singular", "unknown"): gen_log.error( "Unrecognized plural indicator %r in %s line %d", plural, path, i + 1, ) continue _translations[locale].setdefault(plural, {})[english] = translation _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) gen_log.debug("Supported locales: %s", sorted(_supported_locales))
Loads translations from `gettext`'s locale tree Locale tree is similar to system's ``/usr/share/locale``, like:: {directory}/{lang}/LC_MESSAGES/{domain}.mo Three steps are required to have your app translated: 1. Generate POT translation file:: xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc 2. Merge against existing POT file:: msgmerge old.po mydomain.po > new.po 3. Compile:: msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
def load_gettext_translations(directory: str, domain: str) -> None: """Loads translations from `gettext`'s locale tree Locale tree is similar to system's ``/usr/share/locale``, like:: {directory}/{lang}/LC_MESSAGES/{domain}.mo Three steps are required to have your app translated: 1. Generate POT translation file:: xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc 2. Merge against existing POT file:: msgmerge old.po mydomain.po > new.po 3. Compile:: msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo """ global _translations global _supported_locales global _use_gettext _translations = {} for filename in glob.glob( os.path.join(directory, "*", "LC_MESSAGES", domain + ".mo") ): lang = os.path.basename(os.path.dirname(os.path.dirname(filename))) try: _translations[lang] = gettext.translation( domain, directory, languages=[lang] ) except Exception as e: gen_log.error("Cannot load translation for '%s': %s", lang, str(e)) continue _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) _use_gettext = True gen_log.debug("Supported locales: %s", sorted(_supported_locales))
Returns a list of all the supported locale codes.
def get_supported_locales() -> Iterable[str]: """Returns a list of all the supported locale codes.""" return _supported_locales
Turns on formatted logging output as configured. This is called automatically by `tornado.options.parse_command_line` and `tornado.options.parse_config_file`.
def enable_pretty_logging( options: Any = None, logger: Optional[logging.Logger] = None ) -> None: """Turns on formatted logging output as configured. This is called automatically by `tornado.options.parse_command_line` and `tornado.options.parse_config_file`. """ if options is None: import tornado.options options = tornado.options.options if options.logging is None or options.logging.lower() == "none": return if logger is None: logger = logging.getLogger() logger.setLevel(getattr(logging, options.logging.upper())) if options.log_file_prefix: rotate_mode = options.log_rotate_mode if rotate_mode == "size": channel = logging.handlers.RotatingFileHandler( filename=options.log_file_prefix, maxBytes=options.log_file_max_size, backupCount=options.log_file_num_backups, encoding="utf-8", ) # type: logging.Handler elif rotate_mode == "time": channel = logging.handlers.TimedRotatingFileHandler( filename=options.log_file_prefix, when=options.log_rotate_when, interval=options.log_rotate_interval, backupCount=options.log_file_num_backups, encoding="utf-8", ) else: error_message = ( "The value of log_rotate_mode option should be " + '"size" or "time", not "%s".' % rotate_mode ) raise ValueError(error_message) channel.setFormatter(LogFormatter(color=False)) logger.addHandler(channel) if options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers): # Set up color if we are in a tty and curses is installed channel = logging.StreamHandler() channel.setFormatter(LogFormatter()) logger.addHandler(channel)
Add logging-related flags to ``options``. These options are present automatically on the default options instance; this method is only necessary if you have created your own `.OptionParser`. .. versionadded:: 4.2 This function existed in prior versions but was broken and undocumented until 4.2.
def define_logging_options(options: Any = None) -> None: """Add logging-related flags to ``options``. These options are present automatically on the default options instance; this method is only necessary if you have created your own `.OptionParser`. .. versionadded:: 4.2 This function existed in prior versions but was broken and undocumented until 4.2. """ if options is None: # late import to prevent cycle import tornado.options options = tornado.options.options options.define( "logging", default="info", help=( "Set the Python log level. If 'none', tornado won't touch the " "logging configuration." ), metavar="debug|info|warning|error|none", ) options.define( "log_to_stderr", type=bool, default=None, help=( "Send log output to stderr (colorized if possible). " "By default use stderr if --log_file_prefix is not set and " "no other logging is configured." ), ) options.define( "log_file_prefix", type=str, default=None, metavar="PATH", help=( "Path prefix for log files. " "Note that if you are running multiple tornado processes, " "log_file_prefix must be different for each of them (e.g. " "include the port number)" ), ) options.define( "log_file_max_size", type=int, default=100 * 1000 * 1000, help="max size of log files before rollover", ) options.define( "log_file_num_backups", type=int, default=10, help="number of log files to keep" ) options.define( "log_rotate_when", type=str, default="midnight", help=( "specify the type of TimedRotatingFileHandler interval " "other options:('S', 'M', 'H', 'D', 'W0'-'W6')" ), ) options.define( "log_rotate_interval", type=int, default=1, help="The interval value of timed rotating", ) options.define( "log_rotate_mode", type=str, default="size", help="The mode of rotating files(time or size)", ) options.add_parse_callback(lambda: enable_pretty_logging(options))
Creates listening sockets bound to the given port and address. Returns a list of socket objects (multiple sockets are returned if the given address maps to multiple IP addresses, which is most common for mixed IPv4 and IPv6 use). Address may be either an IP address or hostname. If it's a hostname, the server will listen on all IP addresses associated with the name. Address may be an empty string or None to listen on all available interfaces. Family may be set to either `socket.AF_INET` or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise both will be used if available. The ``backlog`` argument has the same meaning as for `socket.listen() <socket.socket.listen>`. ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``. ``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket in the list. If your platform doesn't support this option ValueError will be raised.
def bind_sockets( port: int, address: Optional[str] = None, family: socket.AddressFamily = socket.AF_UNSPEC, backlog: int = _DEFAULT_BACKLOG, flags: Optional[int] = None, reuse_port: bool = False, ) -> List[socket.socket]: """Creates listening sockets bound to the given port and address. Returns a list of socket objects (multiple sockets are returned if the given address maps to multiple IP addresses, which is most common for mixed IPv4 and IPv6 use). Address may be either an IP address or hostname. If it's a hostname, the server will listen on all IP addresses associated with the name. Address may be an empty string or None to listen on all available interfaces. Family may be set to either `socket.AF_INET` or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise both will be used if available. The ``backlog`` argument has the same meaning as for `socket.listen() <socket.socket.listen>`. ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``. ``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket in the list. If your platform doesn't support this option ValueError will be raised. """ if reuse_port and not hasattr(socket, "SO_REUSEPORT"): raise ValueError("the platform doesn't support SO_REUSEPORT") sockets = [] if address == "": address = None if not socket.has_ipv6 and family == socket.AF_UNSPEC: # Python can be compiled with --disable-ipv6, which causes # operations on AF_INET6 sockets to fail, but does not # automatically exclude those results from getaddrinfo # results. # http://bugs.python.org/issue16208 family = socket.AF_INET if flags is None: flags = socket.AI_PASSIVE bound_port = None unique_addresses = set() # type: set for res in sorted( socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, 0, flags), key=lambda x: x[0], ): if res in unique_addresses: continue unique_addresses.add(res) af, socktype, proto, canonname, sockaddr = res if ( sys.platform == "darwin" and address == "localhost" and af == socket.AF_INET6 and sockaddr[3] != 0 # type: ignore ): # Mac OS X includes a link-local address fe80::1%lo0 in the # getaddrinfo results for 'localhost'. However, the firewall # doesn't understand that this is a local address and will # prompt for access (often repeatedly, due to an apparent # bug in its ability to remember granting access to an # application). Skip these addresses. continue try: sock = socket.socket(af, socktype, proto) except socket.error as e: if errno_from_exception(e) == errno.EAFNOSUPPORT: continue raise if os.name != "nt": try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except socket.error as e: if errno_from_exception(e) != errno.ENOPROTOOPT: # Hurd doesn't support SO_REUSEADDR. raise if reuse_port: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) if af == socket.AF_INET6: # On linux, ipv6 sockets accept ipv4 too by default, # but this makes it impossible to bind to both # 0.0.0.0 in ipv4 and :: in ipv6. On other systems, # separate sockets *must* be used to listen for both ipv4 # and ipv6. For consistency, always disable ipv4 on our # ipv6 sockets and use a separate ipv4 socket when needed. # # Python 2.x on windows doesn't have IPPROTO_IPV6. if hasattr(socket, "IPPROTO_IPV6"): sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) # automatic port allocation with port=None # should bind on the same port on IPv4 and IPv6 host, requested_port = sockaddr[:2] if requested_port == 0 and bound_port is not None: sockaddr = tuple([host, bound_port] + list(sockaddr[2:])) sock.setblocking(False) try: sock.bind(sockaddr) except OSError as e: if ( errno_from_exception(e) == errno.EADDRNOTAVAIL and address == "localhost" and sockaddr[0] == "::1" ): # On some systems (most notably docker with default # configurations), ipv6 is partially disabled: # socket.has_ipv6 is true, we can create AF_INET6 # sockets, and getaddrinfo("localhost", ..., # AF_PASSIVE) resolves to ::1, but we get an error # when binding. # # Swallow the error, but only for this specific case. # If EADDRNOTAVAIL occurs in other situations, it # might be a real problem like a typo in a # configuration. sock.close() continue else: raise bound_port = sock.getsockname()[1] sock.listen(backlog) sockets.append(sock) return sockets
Adds an `.IOLoop` event handler to accept new connections on ``sock``. When a connection is accepted, ``callback(connection, address)`` will be run (``connection`` is a socket object, and ``address`` is the address of the other end of the connection). Note that this signature is different from the ``callback(fd, events)`` signature used for `.IOLoop` handlers. A callable is returned which, when called, will remove the `.IOLoop` event handler and stop processing further incoming connections. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. .. versionchanged:: 5.0 A callable is returned (``None`` was returned before).
def add_accept_handler( sock: socket.socket, callback: Callable[[socket.socket, Any], None] ) -> Callable[[], None]: """Adds an `.IOLoop` event handler to accept new connections on ``sock``. When a connection is accepted, ``callback(connection, address)`` will be run (``connection`` is a socket object, and ``address`` is the address of the other end of the connection). Note that this signature is different from the ``callback(fd, events)`` signature used for `.IOLoop` handlers. A callable is returned which, when called, will remove the `.IOLoop` event handler and stop processing further incoming connections. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. .. versionchanged:: 5.0 A callable is returned (``None`` was returned before). """ io_loop = IOLoop.current() removed = [False] def accept_handler(fd: socket.socket, events: int) -> None: # More connections may come in while we're handling callbacks; # to prevent starvation of other tasks we must limit the number # of connections we accept at a time. Ideally we would accept # up to the number of connections that were waiting when we # entered this method, but this information is not available # (and rearranging this method to call accept() as many times # as possible before running any callbacks would have adverse # effects on load balancing in multiprocess configurations). # Instead, we use the (default) listen backlog as a rough # heuristic for the number of connections we can reasonably # accept at once. for i in range(_DEFAULT_BACKLOG): if removed[0]: # The socket was probably closed return try: connection, address = sock.accept() except BlockingIOError: # EWOULDBLOCK indicates we have accepted every # connection that is available. return except ConnectionAbortedError: # ECONNABORTED indicates that there was a connection # but it was closed while still in the accept queue. # (observed on FreeBSD). continue callback(connection, address) def remove_handler() -> None: io_loop.remove_handler(sock) removed[0] = True io_loop.add_handler(sock, accept_handler, IOLoop.READ) return remove_handler
Returns ``True`` if the given string is a well-formed IP address. Supports IPv4 and IPv6.
def is_valid_ip(ip: str) -> bool: """Returns ``True`` if the given string is a well-formed IP address. Supports IPv4 and IPv6. """ if not ip or "\x00" in ip: # getaddrinfo resolves empty strings to localhost, and truncates # on zero bytes. return False try: res = socket.getaddrinfo( ip, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_NUMERICHOST ) return bool(res) except socket.gaierror as e: if e.args[0] == socket.EAI_NONAME: return False raise except UnicodeError: # `socket.getaddrinfo` will raise a UnicodeError from the # `idna` decoder if the input is longer than 63 characters, # even for socket.AI_NUMERICHOST. See # https://bugs.python.org/issue32958 for discussion return False return True
Try to convert an ``ssl_options`` dictionary to an `~ssl.SSLContext` object. The ``ssl_options`` dictionary contains keywords to be passed to ``ssl.SSLContext.wrap_socket``. In Python 2.7.9+, `ssl.SSLContext` objects can be used instead. This function converts the dict form to its `~ssl.SSLContext` equivalent, and may be used when a component which accepts both forms needs to upgrade to the `~ssl.SSLContext` version to use features like SNI or NPN. .. versionchanged:: 6.2 Added server_side argument. Omitting this argument will result in a DeprecationWarning on Python 3.10.
def ssl_options_to_context( ssl_options: Union[Dict[str, Any], ssl.SSLContext], server_side: Optional[bool] = None, ) -> ssl.SSLContext: """Try to convert an ``ssl_options`` dictionary to an `~ssl.SSLContext` object. The ``ssl_options`` dictionary contains keywords to be passed to ``ssl.SSLContext.wrap_socket``. In Python 2.7.9+, `ssl.SSLContext` objects can be used instead. This function converts the dict form to its `~ssl.SSLContext` equivalent, and may be used when a component which accepts both forms needs to upgrade to the `~ssl.SSLContext` version to use features like SNI or NPN. .. versionchanged:: 6.2 Added server_side argument. Omitting this argument will result in a DeprecationWarning on Python 3.10. """ if isinstance(ssl_options, ssl.SSLContext): return ssl_options assert isinstance(ssl_options, dict) assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options # TODO: Now that we have the server_side argument, can we switch to # create_default_context or would that change behavior? default_version = ssl.PROTOCOL_TLS if server_side: default_version = ssl.PROTOCOL_TLS_SERVER elif server_side is not None: default_version = ssl.PROTOCOL_TLS_CLIENT context = ssl.SSLContext(ssl_options.get("ssl_version", default_version)) if "certfile" in ssl_options: context.load_cert_chain( ssl_options["certfile"], ssl_options.get("keyfile", None) ) if "cert_reqs" in ssl_options: if ssl_options["cert_reqs"] == ssl.CERT_NONE: # This may have been set automatically by PROTOCOL_TLS_CLIENT but is # incompatible with CERT_NONE so we must manually clear it. context.check_hostname = False context.verify_mode = ssl_options["cert_reqs"] if "ca_certs" in ssl_options: context.load_verify_locations(ssl_options["ca_certs"]) if "ciphers" in ssl_options: context.set_ciphers(ssl_options["ciphers"]) if hasattr(ssl, "OP_NO_COMPRESSION"): # Disable TLS compression to avoid CRIME and related attacks. # This constant depends on openssl version 1.0. # TODO: Do we need to do this ourselves or can we trust # the defaults? context.options |= ssl.OP_NO_COMPRESSION return context
Returns an ``ssl.SSLSocket`` wrapping the given socket. ``ssl_options`` may be either an `ssl.SSLContext` object or a dictionary (as accepted by `ssl_options_to_context`). Additional keyword arguments are passed to `ssl.SSLContext.wrap_socket`. .. versionchanged:: 6.2 Added server_side argument. Omitting this argument will result in a DeprecationWarning on Python 3.10.
def ssl_wrap_socket( socket: socket.socket, ssl_options: Union[Dict[str, Any], ssl.SSLContext], server_hostname: Optional[str] = None, server_side: Optional[bool] = None, **kwargs: Any ) -> ssl.SSLSocket: """Returns an ``ssl.SSLSocket`` wrapping the given socket. ``ssl_options`` may be either an `ssl.SSLContext` object or a dictionary (as accepted by `ssl_options_to_context`). Additional keyword arguments are passed to `ssl.SSLContext.wrap_socket`. .. versionchanged:: 6.2 Added server_side argument. Omitting this argument will result in a DeprecationWarning on Python 3.10. """ context = ssl_options_to_context(ssl_options, server_side=server_side) if server_side is None: server_side = False assert ssl.HAS_SNI # TODO: add a unittest for hostname validation (python added server-side SNI support in 3.4) # In the meantime it can be manually tested with # python3 -m tornado.httpclient https://sni.velox.ch return context.wrap_socket( socket, server_hostname=server_hostname, server_side=server_side, **kwargs )
Defines an option in the global namespace. See `OptionParser.define`.
def define( name: str, default: Any = None, type: Optional[type] = None, help: Optional[str] = None, metavar: Optional[str] = None, multiple: bool = False, group: Optional[str] = None, callback: Optional[Callable[[Any], None]] = None, ) -> None: """Defines an option in the global namespace. See `OptionParser.define`. """ return options.define( name, default=default, type=type, help=help, metavar=metavar, multiple=multiple, group=group, callback=callback, )
Parses global options from the command line. See `OptionParser.parse_command_line`.
def parse_command_line( args: Optional[List[str]] = None, final: bool = True ) -> List[str]: """Parses global options from the command line. See `OptionParser.parse_command_line`. """ return options.parse_command_line(args, final=final)
Parses global options from a config file. See `OptionParser.parse_config_file`.
def parse_config_file(path: str, final: bool = True) -> None: """Parses global options from a config file. See `OptionParser.parse_config_file`. """ return options.parse_config_file(path, final=final)
Prints all the command line options to stderr (or another file). See `OptionParser.print_help`.
def print_help(file: Optional[TextIO] = None) -> None: """Prints all the command line options to stderr (or another file). See `OptionParser.print_help`. """ return options.print_help(file)
Adds a parse callback, to be invoked when option parsing is done. See `OptionParser.add_parse_callback`
def add_parse_callback(callback: Callable[[], None]) -> None: """Adds a parse callback, to be invoked when option parsing is done. See `OptionParser.add_parse_callback` """ options.add_parse_callback(callback)
Returns the number of processors on this machine.
def cpu_count() -> int: """Returns the number of processors on this machine.""" if multiprocessing is None: return 1 try: return multiprocessing.cpu_count() except NotImplementedError: pass try: return os.sysconf("SC_NPROCESSORS_CONF") # type: ignore except (AttributeError, ValueError): pass gen_log.error("Could not detect number of processors; assuming 1") return 1
Starts multiple worker processes. If ``num_processes`` is None or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If ``num_processes`` is given and > 0, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the ``autoreload=True`` option to `tornado.web.Application` which defaults to True when ``debug=True``). When using multiple processes, no IOLoops can be created or referenced until after the call to ``fork_processes``. In each child process, ``fork_processes`` returns its *task id*, a number between 0 and ``num_processes``. Processes that exit abnormally (due to a signal or non-zero exit status) are restarted with the same id (up to ``max_restarts`` times). In the parent process, ``fork_processes`` calls ``sys.exit(0)`` after all child processes have exited normally. max_restarts defaults to 100. Availability: Unix
def fork_processes( num_processes: Optional[int], max_restarts: Optional[int] = None ) -> int: """Starts multiple worker processes. If ``num_processes`` is None or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If ``num_processes`` is given and > 0, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the ``autoreload=True`` option to `tornado.web.Application` which defaults to True when ``debug=True``). When using multiple processes, no IOLoops can be created or referenced until after the call to ``fork_processes``. In each child process, ``fork_processes`` returns its *task id*, a number between 0 and ``num_processes``. Processes that exit abnormally (due to a signal or non-zero exit status) are restarted with the same id (up to ``max_restarts`` times). In the parent process, ``fork_processes`` calls ``sys.exit(0)`` after all child processes have exited normally. max_restarts defaults to 100. Availability: Unix """ if sys.platform == "win32": # The exact form of this condition matters to mypy; it understands # if but not assert in this context. raise Exception("fork not available on windows") if max_restarts is None: max_restarts = 100 global _task_id assert _task_id is None if num_processes is None or num_processes <= 0: num_processes = cpu_count() gen_log.info("Starting %d processes", num_processes) children = {} def start_child(i: int) -> Optional[int]: pid = os.fork() if pid == 0: # child process _reseed_random() global _task_id _task_id = i return i else: children[pid] = i return None for i in range(num_processes): id = start_child(i) if id is not None: return id num_restarts = 0 while children: pid, status = os.wait() if pid not in children: continue id = children.pop(pid) if os.WIFSIGNALED(status): gen_log.warning( "child %d (pid %d) killed by signal %d, restarting", id, pid, os.WTERMSIG(status), ) elif os.WEXITSTATUS(status) != 0: gen_log.warning( "child %d (pid %d) exited with status %d, restarting", id, pid, os.WEXITSTATUS(status), ) else: gen_log.info("child %d (pid %d) exited normally", id, pid) continue num_restarts += 1 if num_restarts > max_restarts: raise RuntimeError("Too many child restarts, giving up") new_id = start_child(id) if new_id is not None: return new_id # All child processes exited cleanly, so exit the master process # instead of just returning to right after the call to # fork_processes (which will probably just start up another IOLoop # unless the caller checks the return value). sys.exit(0)
Returns the current task id, if any. Returns None if this process was not created by `fork_processes`.
def task_id() -> Optional[int]: """Returns the current task id, if any. Returns None if this process was not created by `fork_processes`. """ global _task_id return _task_id
None-safe wrapper around url_unescape to handle unmatched optional groups correctly. Note that args are passed as bytes so the handler can decide what encoding to use.
def _unquote_or_none(s: Optional[str]) -> Optional[bytes]: # noqa: F811 """None-safe wrapper around url_unescape to handle unmatched optional groups correctly. Note that args are passed as bytes so the handler can decide what encoding to use. """ if s is None: return s return url_unescape(s, encoding=None, plus=False)
Transform whitespace in ``text`` according to ``mode``. Available modes are: * ``all``: Return all whitespace unmodified. * ``single``: Collapse consecutive whitespace with a single whitespace character, preserving newlines. * ``oneline``: Collapse all runs of whitespace into a single space character, removing all newlines in the process. .. versionadded:: 4.3
def filter_whitespace(mode: str, text: str) -> str: """Transform whitespace in ``text`` according to ``mode``. Available modes are: * ``all``: Return all whitespace unmodified. * ``single``: Collapse consecutive whitespace with a single whitespace character, preserving newlines. * ``oneline``: Collapse all runs of whitespace into a single space character, removing all newlines in the process. .. versionadded:: 4.3 """ if mode == "all": return text elif mode == "single": text = re.sub(r"([\t ]+)", " ", text) text = re.sub(r"(\s*\n\s*)", "\n", text) return text elif mode == "oneline": return re.sub(r"(\s+)", " ", text) else: raise Exception("invalid whitespace mode %s" % mode)
Binds a server socket to an available port on localhost. Returns a tuple (socket, port). .. versionchanged:: 4.4 Always binds to ``127.0.0.1`` without resolving the name ``localhost``. .. versionchanged:: 6.2 Added optional ``address`` argument to override the default "127.0.0.1".
def bind_unused_port( reuse_port: bool = False, address: str = "127.0.0.1" ) -> Tuple[socket.socket, int]: """Binds a server socket to an available port on localhost. Returns a tuple (socket, port). .. versionchanged:: 4.4 Always binds to ``127.0.0.1`` without resolving the name ``localhost``. .. versionchanged:: 6.2 Added optional ``address`` argument to override the default "127.0.0.1". """ sock = netutil.bind_sockets( 0, address, family=socket.AF_INET, reuse_port=reuse_port )[0] port = sock.getsockname()[1] return sock, port
Get the global timeout setting for async tests. Returns a float, the timeout in seconds. .. versionadded:: 3.1
def get_async_test_timeout() -> float: """Get the global timeout setting for async tests. Returns a float, the timeout in seconds. .. versionadded:: 3.1 """ env = os.environ.get("ASYNC_TEST_TIMEOUT") if env is not None: try: return float(env) except ValueError: pass return 5
Testing equivalent of ``@gen.coroutine``, to be applied to test methods. ``@gen.coroutine`` cannot be used on tests because the `.IOLoop` is not already running. ``@gen_test`` should be applied to test methods on subclasses of `AsyncTestCase`. Example:: class MyTest(AsyncHTTPTestCase): @gen_test def test_something(self): response = yield self.http_client.fetch(self.get_url('/')) By default, ``@gen_test`` times out after 5 seconds. The timeout may be overridden globally with the ``ASYNC_TEST_TIMEOUT`` environment variable, or for each test with the ``timeout`` keyword argument:: class MyTest(AsyncHTTPTestCase): @gen_test(timeout=10) def test_something_slow(self): response = yield self.http_client.fetch(self.get_url('/')) Note that ``@gen_test`` is incompatible with `AsyncTestCase.stop`, `AsyncTestCase.wait`, and `AsyncHTTPTestCase.fetch`. Use ``yield self.http_client.fetch(self.get_url())`` as shown above instead. .. versionadded:: 3.1 The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment variable. .. versionchanged:: 4.0 The wrapper now passes along ``*args, **kwargs`` so it can be used on functions with arguments.
def gen_test( # noqa: F811 func: Optional[Callable[..., Union[Generator, "Coroutine"]]] = None, timeout: Optional[float] = None, ) -> Union[ Callable[..., None], Callable[[Callable[..., Union[Generator, "Coroutine"]]], Callable[..., None]], ]: """Testing equivalent of ``@gen.coroutine``, to be applied to test methods. ``@gen.coroutine`` cannot be used on tests because the `.IOLoop` is not already running. ``@gen_test`` should be applied to test methods on subclasses of `AsyncTestCase`. Example:: class MyTest(AsyncHTTPTestCase): @gen_test def test_something(self): response = yield self.http_client.fetch(self.get_url('/')) By default, ``@gen_test`` times out after 5 seconds. The timeout may be overridden globally with the ``ASYNC_TEST_TIMEOUT`` environment variable, or for each test with the ``timeout`` keyword argument:: class MyTest(AsyncHTTPTestCase): @gen_test(timeout=10) def test_something_slow(self): response = yield self.http_client.fetch(self.get_url('/')) Note that ``@gen_test`` is incompatible with `AsyncTestCase.stop`, `AsyncTestCase.wait`, and `AsyncHTTPTestCase.fetch`. Use ``yield self.http_client.fetch(self.get_url())`` as shown above instead. .. versionadded:: 3.1 The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment variable. .. versionchanged:: 4.0 The wrapper now passes along ``*args, **kwargs`` so it can be used on functions with arguments. """ if timeout is None: timeout = get_async_test_timeout() def wrap(f: Callable[..., Union[Generator, "Coroutine"]]) -> Callable[..., None]: # Stack up several decorators to allow us to access the generator # object itself. In the innermost wrapper, we capture the generator # and save it in an attribute of self. Next, we run the wrapped # function through @gen.coroutine. Finally, the coroutine is # wrapped again to make it synchronous with run_sync. # # This is a good case study arguing for either some sort of # extensibility in the gen decorators or cancellation support. @functools.wraps(f) def pre_coroutine(self, *args, **kwargs): # type: (AsyncTestCase, *Any, **Any) -> Union[Generator, Coroutine] # Type comments used to avoid pypy3 bug. result = f(self, *args, **kwargs) if isinstance(result, Generator) or inspect.iscoroutine(result): self._test_generator = result else: self._test_generator = None return result if inspect.iscoroutinefunction(f): coro = pre_coroutine else: coro = gen.coroutine(pre_coroutine) # type: ignore[assignment] @functools.wraps(coro) def post_coroutine(self, *args, **kwargs): # type: (AsyncTestCase, *Any, **Any) -> None try: return self.io_loop.run_sync( functools.partial(coro, self, *args, **kwargs), timeout=timeout ) except TimeoutError as e: # run_sync raises an error with an unhelpful traceback. # If the underlying generator is still running, we can throw the # exception back into it so the stack trace is replaced by the # point where the test is stopped. The only reason the generator # would not be running would be if it were cancelled, which means # a native coroutine, so we can rely on the cr_running attribute. if self._test_generator is not None and getattr( self._test_generator, "cr_running", True ): self._test_generator.throw(e) # In case the test contains an overly broad except # clause, we may get back here. # Coroutine was stopped or didn't raise a useful stack trace, # so re-raise the original exception which is better than nothing. raise return post_coroutine if func is not None: # Used like: # @gen_test # def f(self): # pass return wrap(func) else: # Used like @gen_test(timeout=10) return wrap
Use a contextmanager to setUp a test case.
def setup_with_context_manager(testcase: unittest.TestCase, cm: Any) -> Any: """Use a contextmanager to setUp a test case.""" val = cm.__enter__() testcase.addCleanup(cm.__exit__, None, None, None) return val
A simple test runner. This test runner is essentially equivalent to `unittest.main` from the standard library, but adds support for Tornado-style option parsing and log formatting. It is *not* necessary to use this `main` function to run tests using `AsyncTestCase`; these tests are self-contained and can run with any test runner. The easiest way to run a test is via the command line:: python -m tornado.testing tornado.test.web_test See the standard library ``unittest`` module for ways in which tests can be specified. Projects with many tests may wish to define a test script like ``tornado/test/runtests.py``. This script should define a method ``all()`` which returns a test suite and then call `tornado.testing.main()`. Note that even when a test script is used, the ``all()`` test suite may be overridden by naming a single test on the command line:: # Runs all tests python -m tornado.test.runtests # Runs one test python -m tornado.test.runtests tornado.test.web_test Additional keyword arguments passed through to ``unittest.main()``. For example, use ``tornado.testing.main(verbosity=2)`` to show many test details as they are run. See http://docs.python.org/library/unittest.html#unittest.main for full argument list. .. versionchanged:: 5.0 This function produces no output of its own; only that produced by the `unittest` module (previously it would add a PASS or FAIL log message).
def main(**kwargs: Any) -> None: """A simple test runner. This test runner is essentially equivalent to `unittest.main` from the standard library, but adds support for Tornado-style option parsing and log formatting. It is *not* necessary to use this `main` function to run tests using `AsyncTestCase`; these tests are self-contained and can run with any test runner. The easiest way to run a test is via the command line:: python -m tornado.testing tornado.test.web_test See the standard library ``unittest`` module for ways in which tests can be specified. Projects with many tests may wish to define a test script like ``tornado/test/runtests.py``. This script should define a method ``all()`` which returns a test suite and then call `tornado.testing.main()`. Note that even when a test script is used, the ``all()`` test suite may be overridden by naming a single test on the command line:: # Runs all tests python -m tornado.test.runtests # Runs one test python -m tornado.test.runtests tornado.test.web_test Additional keyword arguments passed through to ``unittest.main()``. For example, use ``tornado.testing.main(verbosity=2)`` to show many test details as they are run. See http://docs.python.org/library/unittest.html#unittest.main for full argument list. .. versionchanged:: 5.0 This function produces no output of its own; only that produced by the `unittest` module (previously it would add a PASS or FAIL log message). """ from tornado.options import define, options, parse_command_line define( "exception_on_interrupt", type=bool, default=True, help=( "If true (default), ctrl-c raises a KeyboardInterrupt " "exception. This prints a stack trace but cannot interrupt " "certain operations. If false, the process is more reliably " "killed, but does not print a stack trace." ), ) # support the same options as unittest's command-line interface define("verbose", type=bool) define("quiet", type=bool) define("failfast", type=bool) define("catch", type=bool) define("buffer", type=bool) argv = [sys.argv[0]] + parse_command_line(sys.argv) if not options.exception_on_interrupt: signal.signal(signal.SIGINT, signal.SIG_DFL) if options.verbose is not None: kwargs["verbosity"] = 2 if options.quiet is not None: kwargs["verbosity"] = 0 if options.failfast is not None: kwargs["failfast"] = True if options.catch is not None: kwargs["catchbreak"] = True if options.buffer is not None: kwargs["buffer"] = True if __name__ == "__main__" and len(argv) == 1: print("No tests specified", file=sys.stderr) sys.exit(1) # In order to be able to run tests by their fully-qualified name # on the command line without importing all tests here, # module must be set to None. Python 3.2's unittest.main ignores # defaultTest if no module is given (it tries to do its own # test discovery, which is incompatible with auto2to3), so don't # set module if we're not asking for a specific test. if len(argv) > 1: unittest.main(module=None, argv=argv, **kwargs) # type: ignore else: unittest.main(defaultTest="all", argv=argv, **kwargs)
Imports an object by name. ``import_object('x')`` is equivalent to ``import x``. ``import_object('x.y.z')`` is equivalent to ``from x.y import z``. >>> import tornado.escape >>> import_object('tornado.escape') is tornado.escape True >>> import_object('tornado.escape.utf8') is tornado.escape.utf8 True >>> import_object('tornado') is tornado True >>> import_object('tornado.missing_module') Traceback (most recent call last): ... ImportError: No module named missing_module
def import_object(name: str) -> Any: """Imports an object by name. ``import_object('x')`` is equivalent to ``import x``. ``import_object('x.y.z')`` is equivalent to ``from x.y import z``. >>> import tornado.escape >>> import_object('tornado.escape') is tornado.escape True >>> import_object('tornado.escape.utf8') is tornado.escape.utf8 True >>> import_object('tornado') is tornado True >>> import_object('tornado.missing_module') Traceback (most recent call last): ... ImportError: No module named missing_module """ if name.count(".") == 0: return __import__(name) parts = name.split(".") obj = __import__(".".join(parts[:-1]), fromlist=[parts[-1]]) try: return getattr(obj, parts[-1]) except AttributeError: raise ImportError("No module named %s" % parts[-1])
Provides the errno from an Exception object. There are cases that the errno attribute was not set so we pull the errno out of the args but if someone instantiates an Exception without any args you will get a tuple error. So this function abstracts all that behavior to give you a safe way to get the errno.
def errno_from_exception(e: BaseException) -> Optional[int]: """Provides the errno from an Exception object. There are cases that the errno attribute was not set so we pull the errno out of the args but if someone instantiates an Exception without any args you will get a tuple error. So this function abstracts all that behavior to give you a safe way to get the errno. """ if hasattr(e, "errno"): return e.errno # type: ignore elif e.args: return e.args[0] else: return None
Unescape a string escaped by `re.escape`. May raise ``ValueError`` for regular expressions which could not have been produced by `re.escape` (for example, strings containing ``\d`` cannot be unescaped). .. versionadded:: 4.4
def re_unescape(s: str) -> str: r"""Unescape a string escaped by `re.escape`. May raise ``ValueError`` for regular expressions which could not have been produced by `re.escape` (for example, strings containing ``\d`` cannot be unescaped). .. versionadded:: 4.4 """ return _re_unescape_pattern.sub(_re_unescape_replacement, s)
Equivalent to ``td.total_seconds()`` (introduced in Python 2.7).
def timedelta_to_seconds(td): # type: (datetime.timedelta) -> float """Equivalent to ``td.total_seconds()`` (introduced in Python 2.7).""" return td.total_seconds()
Websocket masking function. `mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length. Returns a `bytes` object of the same length as `data` with the mask applied as specified in section 5.3 of RFC 6455. This pure-python implementation may be replaced by an optimized version when available.
def _websocket_mask_python(mask: bytes, data: bytes) -> bytes: """Websocket masking function. `mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length. Returns a `bytes` object of the same length as `data` with the mask applied as specified in section 5.3 of RFC 6455. This pure-python implementation may be replaced by an optimized version when available. """ mask_arr = array.array("B", mask) unmasked_arr = array.array("B", data) for i in range(len(data)): unmasked_arr[i] = unmasked_arr[i] ^ mask_arr[i % 4] return unmasked_arr.tobytes()
Apply to `RequestHandler` subclasses to enable streaming body support. This decorator implies the following changes: * `.HTTPServerRequest.body` is undefined, and body arguments will not be included in `RequestHandler.get_argument`. * `RequestHandler.prepare` is called when the request headers have been read instead of after the entire body has been read. * The subclass must define a method ``data_received(self, data):``, which will be called zero or more times as data is available. Note that if the request has an empty body, ``data_received`` may not be called. * ``prepare`` and ``data_received`` may return Futures (such as via ``@gen.coroutine``, in which case the next method will not be called until those futures have completed. * The regular HTTP method (``post``, ``put``, etc) will be called after the entire body has been read. See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/stable/demos/file_upload/>`_ for example usage.
def stream_request_body(cls: Type[_RequestHandlerType]) -> Type[_RequestHandlerType]: """Apply to `RequestHandler` subclasses to enable streaming body support. This decorator implies the following changes: * `.HTTPServerRequest.body` is undefined, and body arguments will not be included in `RequestHandler.get_argument`. * `RequestHandler.prepare` is called when the request headers have been read instead of after the entire body has been read. * The subclass must define a method ``data_received(self, data):``, which will be called zero or more times as data is available. Note that if the request has an empty body, ``data_received`` may not be called. * ``prepare`` and ``data_received`` may return Futures (such as via ``@gen.coroutine``, in which case the next method will not be called until those futures have completed. * The regular HTTP method (``post``, ``put``, etc) will be called after the entire body has been read. See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/stable/demos/file_upload/>`_ for example usage. """ # noqa: E501 if not issubclass(cls, RequestHandler): raise TypeError("expected subclass of RequestHandler, got %r", cls) cls._stream_request_body = True return cls
Use this decorator to remove trailing slashes from the request path. For example, a request to ``/foo/`` would redirect to ``/foo`` with this decorator. Your request handler mapping should use a regular expression like ``r'/foo/*'`` in conjunction with using the decorator.
def removeslash( method: Callable[..., Optional[Awaitable[None]]] ) -> Callable[..., Optional[Awaitable[None]]]: """Use this decorator to remove trailing slashes from the request path. For example, a request to ``/foo/`` would redirect to ``/foo`` with this decorator. Your request handler mapping should use a regular expression like ``r'/foo/*'`` in conjunction with using the decorator. """ @functools.wraps(method) def wrapper( # type: ignore self: RequestHandler, *args, **kwargs ) -> Optional[Awaitable[None]]: if self.request.path.endswith("/"): if self.request.method in ("GET", "HEAD"): uri = self.request.path.rstrip("/") if uri: # don't try to redirect '/' to '' if self.request.query: uri += "?" + self.request.query self.redirect(uri, permanent=True) return None else: raise HTTPError(404) return method(self, *args, **kwargs) return wrapper
Use this decorator to add a missing trailing slash to the request path. For example, a request to ``/foo`` would redirect to ``/foo/`` with this decorator. Your request handler mapping should use a regular expression like ``r'/foo/?'`` in conjunction with using the decorator.
def addslash( method: Callable[..., Optional[Awaitable[None]]] ) -> Callable[..., Optional[Awaitable[None]]]: """Use this decorator to add a missing trailing slash to the request path. For example, a request to ``/foo`` would redirect to ``/foo/`` with this decorator. Your request handler mapping should use a regular expression like ``r'/foo/?'`` in conjunction with using the decorator. """ @functools.wraps(method) def wrapper( # type: ignore self: RequestHandler, *args, **kwargs ) -> Optional[Awaitable[None]]: if not self.request.path.endswith("/"): if self.request.method in ("GET", "HEAD"): uri = self.request.path + "/" if self.request.query: uri += "?" + self.request.query self.redirect(uri, permanent=True) return None raise HTTPError(404) return method(self, *args, **kwargs) return wrapper
Decorate methods with this to require that the user be logged in. If the user is not logged in, they will be redirected to the configured `login url <RequestHandler.get_login_url>`. If you configure a login url with a query parameter, Tornado will assume you know what you're doing and use it as-is. If not, it will add a `next` parameter so the login page knows where to send you once you're logged in.
def authenticated( method: Callable[..., Optional[Awaitable[None]]] ) -> Callable[..., Optional[Awaitable[None]]]: """Decorate methods with this to require that the user be logged in. If the user is not logged in, they will be redirected to the configured `login url <RequestHandler.get_login_url>`. If you configure a login url with a query parameter, Tornado will assume you know what you're doing and use it as-is. If not, it will add a `next` parameter so the login page knows where to send you once you're logged in. """ @functools.wraps(method) def wrapper( # type: ignore self: RequestHandler, *args, **kwargs ) -> Optional[Awaitable[None]]: if not self.current_user: if self.request.method in ("GET", "HEAD"): url = self.get_login_url() if "?" not in url: if urllib.parse.urlsplit(url).scheme: # if login url is absolute, make next absolute too next_url = self.request.full_url() else: assert self.request.uri is not None next_url = self.request.uri url += "?" + urlencode(dict(next=next_url)) self.redirect(url) return None raise HTTPError(403) return method(self, *args, **kwargs) return wrapper
Client-side websocket support. Takes a url and returns a Future whose result is a `WebSocketClientConnection`. ``compression_options`` is interpreted in the same way as the return value of `.WebSocketHandler.get_compression_options`. The connection supports two styles of operation. In the coroutine style, the application typically calls `~.WebSocketClientConnection.read_message` in a loop:: conn = yield websocket_connect(url) while True: msg = yield conn.read_message() if msg is None: break # Do something with msg In the callback style, pass an ``on_message_callback`` to ``websocket_connect``. In both styles, a message of ``None`` indicates that the connection has been closed. ``subprotocols`` may be a list of strings specifying proposed subprotocols. The selected protocol may be found on the ``selected_subprotocol`` attribute of the connection object when the connection is complete. .. versionchanged:: 3.2 Also accepts ``HTTPRequest`` objects in place of urls. .. versionchanged:: 4.1 Added ``compression_options`` and ``on_message_callback``. .. versionchanged:: 4.5 Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size`` arguments, which have the same meaning as in `WebSocketHandler`. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. .. versionchanged:: 5.1 Added the ``subprotocols`` argument. .. versionchanged:: 6.3 Added the ``resolver`` argument.
def websocket_connect( url: Union[str, httpclient.HTTPRequest], callback: Optional[Callable[["Future[WebSocketClientConnection]"], None]] = None, connect_timeout: Optional[float] = None, on_message_callback: Optional[Callable[[Union[None, str, bytes]], None]] = None, compression_options: Optional[Dict[str, Any]] = None, ping_interval: Optional[float] = None, ping_timeout: Optional[float] = None, max_message_size: int = _default_max_message_size, subprotocols: Optional[List[str]] = None, resolver: Optional[Resolver] = None, ) -> "Awaitable[WebSocketClientConnection]": """Client-side websocket support. Takes a url and returns a Future whose result is a `WebSocketClientConnection`. ``compression_options`` is interpreted in the same way as the return value of `.WebSocketHandler.get_compression_options`. The connection supports two styles of operation. In the coroutine style, the application typically calls `~.WebSocketClientConnection.read_message` in a loop:: conn = yield websocket_connect(url) while True: msg = yield conn.read_message() if msg is None: break # Do something with msg In the callback style, pass an ``on_message_callback`` to ``websocket_connect``. In both styles, a message of ``None`` indicates that the connection has been closed. ``subprotocols`` may be a list of strings specifying proposed subprotocols. The selected protocol may be found on the ``selected_subprotocol`` attribute of the connection object when the connection is complete. .. versionchanged:: 3.2 Also accepts ``HTTPRequest`` objects in place of urls. .. versionchanged:: 4.1 Added ``compression_options`` and ``on_message_callback``. .. versionchanged:: 4.5 Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size`` arguments, which have the same meaning as in `WebSocketHandler`. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. .. versionchanged:: 5.1 Added the ``subprotocols`` argument. .. versionchanged:: 6.3 Added the ``resolver`` argument. """ if isinstance(url, httpclient.HTTPRequest): assert connect_timeout is None request = url # Copy and convert the headers dict/object (see comments in # AsyncHTTPClient.fetch) request.headers = httputil.HTTPHeaders(request.headers) else: request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout) request = cast( httpclient.HTTPRequest, httpclient._RequestProxy(request, httpclient.HTTPRequest._DEFAULTS), ) conn = WebSocketClientConnection( request, on_message_callback=on_message_callback, compression_options=compression_options, ping_interval=ping_interval, ping_timeout=ping_timeout, max_message_size=max_message_size, subprotocols=subprotocols, resolver=resolver, ) if callback is not None: IOLoop.current().add_future(conn.connect_future, callback) return conn.connect_future
Convert an `asyncio.Future` to a `tornado.concurrent.Future`. .. versionadded:: 4.1 .. deprecated:: 5.0 Tornado ``Futures`` have been merged with `asyncio.Future`, so this method is now a no-op.
def to_tornado_future(asyncio_future: asyncio.Future) -> asyncio.Future: """Convert an `asyncio.Future` to a `tornado.concurrent.Future`. .. versionadded:: 4.1 .. deprecated:: 5.0 Tornado ``Futures`` have been merged with `asyncio.Future`, so this method is now a no-op. """ return asyncio_future
Convert a Tornado yieldable object to an `asyncio.Future`. .. versionadded:: 4.1 .. versionchanged:: 4.3 Now accepts any yieldable object, not just `tornado.concurrent.Future`. .. deprecated:: 5.0 Tornado ``Futures`` have been merged with `asyncio.Future`, so this method is now equivalent to `tornado.gen.convert_yielded`.
def to_asyncio_future(tornado_future: asyncio.Future) -> asyncio.Future: """Convert a Tornado yieldable object to an `asyncio.Future`. .. versionadded:: 4.1 .. versionchanged:: 4.3 Now accepts any yieldable object, not just `tornado.concurrent.Future`. .. deprecated:: 5.0 Tornado ``Futures`` have been merged with `asyncio.Future`, so this method is now equivalent to `tornado.gen.convert_yielded`. """ return convert_yielded(tornado_future)
Install ``AsyncioSelectorReactor`` as the default Twisted reactor. .. deprecated:: 5.1 This function is provided for backwards compatibility; code that does not require compatibility with older versions of Tornado should use ``twisted.internet.asyncioreactor.install()`` directly. .. versionchanged:: 6.0.3 In Tornado 5.x and before, this function installed a reactor based on the Tornado ``IOLoop``. When that reactor implementation was removed in Tornado 6.0.0, this function was removed as well. It was restored in Tornado 6.0.3 using the ``asyncio`` reactor instead.
def install() -> None: """Install ``AsyncioSelectorReactor`` as the default Twisted reactor. .. deprecated:: 5.1 This function is provided for backwards compatibility; code that does not require compatibility with older versions of Tornado should use ``twisted.internet.asyncioreactor.install()`` directly. .. versionchanged:: 6.0.3 In Tornado 5.x and before, this function installed a reactor based on the Tornado ``IOLoop``. When that reactor implementation was removed in Tornado 6.0.0, this function was removed as well. It was restored in Tornado 6.0.3 using the ``asyncio`` reactor instead. """ from twisted.internet.asyncioreactor import install install()
Find circular references in a list of objects. The garbage list contains objects that participate in a cycle, but also the larger set of objects kept alive by that cycle. This function finds subsets of those objects that make up the cycle(s).
def find_circular_references(garbage): """Find circular references in a list of objects. The garbage list contains objects that participate in a cycle, but also the larger set of objects kept alive by that cycle. This function finds subsets of those objects that make up the cycle(s). """ def inner(level): for item in level: item_id = id(item) if item_id not in garbage_ids: continue if item_id in visited_ids: continue if item_id in stack_ids: candidate = stack[stack.index(item) :] candidate.append(item) found.append(candidate) continue stack.append(item) stack_ids.add(item_id) inner(gc.get_referents(item)) stack.pop() stack_ids.remove(item_id) visited_ids.add(item_id) found: typing.List[object] = [] stack = [] stack_ids = set() garbage_ids = set(map(id, garbage)) visited_ids = set() inner(garbage) return found
Raise AssertionError if the wrapped code creates garbage with cycles.
def assert_no_cycle_garbage(): """Raise AssertionError if the wrapped code creates garbage with cycles.""" gc.disable() gc.collect() gc.set_debug(gc.DEBUG_STATS | gc.DEBUG_SAVEALL) yield try: # We have DEBUG_STATS on which causes gc.collect to write to stderr. # Capture the output instead of spamming the logs on passing runs. f = io.StringIO() old_stderr = sys.stderr sys.stderr = f try: gc.collect() finally: sys.stderr = old_stderr garbage = gc.garbage[:] # Must clear gc.garbage (the same object, not just replacing it with a # new list) to avoid warnings at shutdown. gc.garbage[:] = [] if len(garbage) == 0: return for circular in find_circular_references(garbage): f.write("\n==========\n Circular \n==========") for item in circular: f.write(f"\n {repr(item)}") for item in circular: if isinstance(item, types.FrameType): f.write(f"\nLocals: {item.f_locals}") f.write(f"\nTraceback: {repr(item)}") traceback.print_stack(item) del garbage raise AssertionError(f.getvalue()) finally: gc.set_debug(0) gc.enable()
Return two empty dicts suitable for use with parse_multipart_form_data. mypy insists on type annotations for dict literals, so this lets us avoid the verbose types throughout this test.
def form_data_args() -> Tuple[Dict[str, List[bytes]], Dict[str, List[HTTPFile]]]: """Return two empty dicts suitable for use with parse_multipart_form_data. mypy insists on type annotations for dict literals, so this lets us avoid the verbose types throughout this test. """ return {}, {}
Dummy implementation of getaddrinfo for use in mocks
def _failing_getaddrinfo(*args): """Dummy implementation of getaddrinfo for use in mocks""" raise socket.gaierror(errno.EIO, "mock: lookup failed")
Returns a local port number that will refuse all connections. Return value is (cleanup_func, port); the cleanup function must be called to free the port to be reused.
def refusing_port(): """Returns a local port number that will refuse all connections. Return value is (cleanup_func, port); the cleanup function must be called to free the port to be reused. """ # On travis-ci, port numbers are reassigned frequently. To avoid # collisions with other tests, we use an open client-side socket's # ephemeral port number to ensure that nothing can listen on that # port. server_socket, port = bind_unused_port() server_socket.setblocking(True) client_socket = socket.socket() client_socket.connect(("127.0.0.1", port)) conn, client_addr = server_socket.accept() conn.close() server_socket.close() return (client_socket.close, client_addr[1])
Execute ``s`` in a given context and return the result namespace. Used to define functions for tests in particular python versions that would be syntax errors in older versions.
def exec_test(caller_globals, caller_locals, s): """Execute ``s`` in a given context and return the result namespace. Used to define functions for tests in particular python versions that would be syntax errors in older versions. """ # Flatten the real global and local namespace into our fake # globals: it's all global from the perspective of code defined # in s. global_namespace = dict(caller_globals, **caller_locals) # type: ignore local_namespace = {} # type: typing.Dict[str, typing.Any] exec(textwrap.dedent(s), global_namespace, local_namespace) return local_namespace
Compatibility shim for unittest.TestCase.subTest. Usage: ``with tornado.test.util.subTest(self, x=x):``
def subTest(test, *args, **kwargs): """Compatibility shim for unittest.TestCase.subTest. Usage: ``with tornado.test.util.subTest(self, x=x):`` """ try: subTest = test.subTest # py34+ except AttributeError: subTest = contextlib.contextmanager(lambda *a, **kw: (yield)) return subTest(*args, **kwargs)
Context manager to ignore deprecation warnings.
def ignore_deprecation(): """Context manager to ignore deprecation warnings.""" with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) yield
doc : str, document to parse user_input : bool, optional. [default: True] for only options requiring user input
def doc2opt(doc, user_input=True): """ doc : str, document to parse user_input : bool, optional. [default: True] for only options requiring user input """ RE = RE_OPT_INPUT if user_input else RE_OPT return ('--' + i for i in RE.findall(doc))
arglist : bool, whether to create argument lists raw : bool, ignores arglist and indents by 2 spaces
def doc2rst(doc, arglist=True, raw=False): """ arglist : bool, whether to create argument lists raw : bool, ignores arglist and indents by 2 spaces """ doc = doc.replace('`', '``') if raw: doc = doc.replace('\n ', '\n ') else: doc = dedent(doc) if arglist: doc = '\n'.join(i if not i or i[0] == ' ' else '* ' + i + ' ' for i in doc.split('\n')) return doc
Coroutine chain pipe `send()`ing to `target`. This: >>> r = receiver() >>> p = producer(r) >>> next(r) >>> next(p) Becomes: >>> r = receiver() >>> t = tqdm.pipe(r) >>> p = producer(t) >>> next(r) >>> next(p)
def tqdm_pipe(target, **tqdm_kwargs): """ Coroutine chain pipe `send()`ing to `target`. This: >>> r = receiver() >>> p = producer(r) >>> next(r) >>> next(p) Becomes: >>> r = receiver() >>> t = tqdm.pipe(r) >>> p = producer(t) >>> next(r) >>> next(p) """ with tqdm(**tqdm_kwargs) as pbar: while True: obj = (yield) target.send(obj) pbar.update()
Wraps tqdm instance. Don't forget to close() or __exit__() the tqdm instance once you're done with it (easiest using `with` syntax). Example ------- >>> with tqdm(...) as t: ... reporthook = my_hook(t) ... urllib.urlretrieve(..., reporthook=reporthook)
def my_hook(t): """Wraps tqdm instance. Don't forget to close() or __exit__() the tqdm instance once you're done with it (easiest using `with` syntax). Example ------- >>> with tqdm(...) as t: ... reporthook = my_hook(t) ... urllib.urlretrieve(..., reporthook=reporthook) """ last_b = [0] def update_to(b=1, bsize=1, tsize=None): """ b : int, optional Number of blocks transferred so far [default: 1]. bsize : int, optional Size of each block (in tqdm units) [default: 1]. tsize : int, optional Total size (in tqdm units). If [default: None] or -1, remains unchanged. """ if tsize not in (None, -1): t.total = tsize displayed = t.update((b - last_b[0]) * bsize) last_b[0] = b return displayed return update_to
Fixture for all tests ensuring environment cleanup
def pretest_posttest(): """Fixture for all tests ensuring environment cleanup""" try: sys.setswitchinterval(1) except AttributeError: sys.setcheckinterval(100) # deprecated if getattr(tqdm, "_instances", False): n = len(tqdm._instances) if n: tqdm._instances.clear() raise EnvironmentError(f"{n} `tqdm` instances still in existence PRE-test") yield if getattr(tqdm, "_instances", False): n = len(tqdm._instances) if n: tqdm._instances.clear() raise EnvironmentError(f"{n} `tqdm` instances still in existence POST-test")
Dummy function
def incr(x): """Dummy function""" return x + 1
Test contrib.concurrent.thread_map
def test_thread_map(): """Test contrib.concurrent.thread_map""" with closing(StringIO()) as our_file: a = range(9) b = [i + 1 for i in a] try: assert thread_map(lambda x: x + 1, a, file=our_file) == b except ImportError as err: skip(str(err)) assert thread_map(incr, a, file=our_file) == b