text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Sets the ``Etag`` header based on static url version. <END_TASK> <USER_TASK:> Description: def compute_etag(self) -> Optional[str]: """Sets the ``Etag`` header based on static url version. This allows efficient ``If-None-Match`` checks against cached versions, and sends the correct ``Etag`` for a partial response (i.e. the same ``Etag`` as the full file). .. versionadded:: 3.1 """
assert self.absolute_path is not None version_hash = self._get_cached_version(self.absolute_path) if not version_hash: return None return '"%s"' % (version_hash,)
<SYSTEM_TASK:> Sets the content and caching headers on the response. <END_TASK> <USER_TASK:> Description: def set_headers(self) -> None: """Sets the content and caching headers on the response. .. versionadded:: 3.1 """
self.set_header("Accept-Ranges", "bytes") self.set_etag_header() if self.modified is not None: self.set_header("Last-Modified", self.modified) content_type = self.get_content_type() if content_type: self.set_header("Content-Type", content_type) cache_time = self.get_cache_time(self.path, self.modified, content_type) if cache_time > 0: self.set_header( "Expires", datetime.datetime.utcnow() + datetime.timedelta(seconds=cache_time), ) self.set_header("Cache-Control", "max-age=" + str(cache_time)) self.set_extra_headers(self.path)
<SYSTEM_TASK:> Returns True if the headers indicate that we should return 304. <END_TASK> <USER_TASK:> Description: def should_return_304(self) -> bool: """Returns True if the headers indicate that we should return 304. .. versionadded:: 3.1 """
# If client sent If-None-Match, use it, ignore If-Modified-Since if self.request.headers.get("If-None-Match"): return self.check_etag_header() # Check the If-Modified-Since, and don't send the result if the # content has not been modified ims_value = self.request.headers.get("If-Modified-Since") if ims_value is not None: date_tuple = email.utils.parsedate(ims_value) if date_tuple is not None: if_since = datetime.datetime(*date_tuple[:6]) assert self.modified is not None if if_since >= self.modified: return True return False
<SYSTEM_TASK:> Returns the absolute location of ``path`` relative to ``root``. <END_TASK> <USER_TASK:> Description: def get_absolute_path(cls, root: str, path: str) -> str: """Returns the absolute location of ``path`` relative to ``root``. ``root`` is the path configured for this `StaticFileHandler` (in most cases the ``static_path`` `Application` setting). This class method may be overridden in subclasses. By default it returns a filesystem path, but other strings may be used as long as they are unique and understood by the subclass's overridden `get_content`. .. versionadded:: 3.1 """
abspath = os.path.abspath(os.path.join(root, path)) return abspath
<SYSTEM_TASK:> Retrieve the content of the requested resource which is located <END_TASK> <USER_TASK:> Description: def get_content( cls, abspath: str, start: int = None, end: int = None ) -> Generator[bytes, None, None]: """Retrieve the content of the requested resource which is located at the given absolute path. This class method may be overridden by subclasses. Note that its signature is different from other overridable class methods (no ``settings`` argument); this is deliberate to ensure that ``abspath`` is able to stand on its own as a cache key. This method should either return a byte string or an iterator of byte strings. The latter is preferred for large files as it helps reduce memory fragmentation. .. versionadded:: 3.1 """
with open(abspath, "rb") as file: if start is not None: file.seek(start) if end is not None: remaining = end - (start or 0) # type: Optional[int] else: remaining = None while True: chunk_size = 64 * 1024 if remaining is not None and remaining < chunk_size: chunk_size = remaining chunk = file.read(chunk_size) if chunk: if remaining is not None: remaining -= len(chunk) yield chunk else: if remaining is not None: assert remaining == 0 return
<SYSTEM_TASK:> Returns the time that ``self.absolute_path`` was last modified. <END_TASK> <USER_TASK:> Description: def get_modified_time(self) -> Optional[datetime.datetime]: """Returns the time that ``self.absolute_path`` was last modified. May be overridden in subclasses. Should return a `~datetime.datetime` object or None. .. versionadded:: 3.1 """
stat_result = self._stat() # NOTE: Historically, this used stat_result[stat.ST_MTIME], # which truncates the fractional portion of the timestamp. It # was changed from that form to stat_result.st_mtime to # satisfy mypy (which disallows the bracket operator), but the # latter form returns a float instead of an int. For # consistency with the past (and because we have a unit test # that relies on this), we truncate the float here, although # I'm not sure that's the right thing to do. modified = datetime.datetime.utcfromtimestamp(int(stat_result.st_mtime)) return modified
<SYSTEM_TASK:> Override to customize cache control behavior. <END_TASK> <USER_TASK:> Description: def get_cache_time( self, path: str, modified: Optional[datetime.datetime], mime_type: str ) -> int: """Override to customize cache control behavior. Return a positive number of seconds to make the result cacheable for that amount of time or 0 to mark resource as cacheable for an unspecified amount of time (subject to browser heuristics). By default returns cache expiry of 10 years for resources requested with ``v`` argument. """
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
<SYSTEM_TASK:> Constructs a versioned url for the given path. <END_TASK> <USER_TASK:> Description: def make_static_url( cls, settings: Dict[str, Any], path: str, include_version: bool = True ) -> str: """Constructs a versioned url for the given path. This method may be overridden in subclasses (but note that it is a class method rather than an instance method). Subclasses are only required to implement the signature ``make_static_url(cls, settings, path)``; other keyword arguments may be passed through `~RequestHandler.static_url` but are not standard. ``settings`` is the `Application.settings` dictionary. ``path`` is the static path being requested. The url returned should be relative to the current host. ``include_version`` determines whether the generated URL should include the query string containing the version hash of the file corresponding to the given ``path``. """
url = settings.get("static_url_prefix", "/static/") + path if not include_version: return url version_hash = cls.get_version(settings, path) if not version_hash: return url return "%s?v=%s" % (url, version_hash)
<SYSTEM_TASK:> Converts a static URL path into a filesystem path. <END_TASK> <USER_TASK:> Description: def parse_url_path(self, url_path: str) -> str: """Converts a static URL path into a filesystem path. ``url_path`` is the path component of the URL with ``static_url_prefix`` removed. The return value should be filesystem path relative to ``static_path``. This is the inverse of `make_static_url`. """
if os.path.sep != "/": url_path = url_path.replace("/", os.path.sep) return url_path
<SYSTEM_TASK:> Generate the version string to be used in static URLs. <END_TASK> <USER_TASK:> Description: def get_version(cls, settings: Dict[str, Any], path: str) -> Optional[str]: """Generate the version string to be used in static URLs. ``settings`` is the `Application.settings` dictionary and ``path`` is the relative location of the requested asset on the filesystem. The returned value should be a string, or ``None`` if no version could be determined. .. versionchanged:: 3.1 This method was previously recommended for subclasses to override; `get_content_version` is now preferred as it allows the base class to handle caching of the result. """
abs_path = cls.get_absolute_path(settings["static_path"], path) return cls._get_cached_version(abs_path)
<SYSTEM_TASK:> Renders a template and returns it as a string. <END_TASK> <USER_TASK:> Description: def render_string(self, path: str, **kwargs: Any) -> bytes: """Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
<SYSTEM_TASK:> Decodes the given value from a URL. <END_TASK> <USER_TASK:> Description: def url_unescape( # noqa: F811 value: Union[str, bytes], encoding: Optional[str] = "utf-8", plus: bool = True ) -> Union[str, bytes]: """Decodes the given value from a URL. The argument may be either a byte or unicode string. If encoding is None, the result will be a byte string. Otherwise, the result is a unicode string in the specified encoding. If ``plus`` is true (the default), plus signs will be interpreted as spaces (literal plus signs must be represented as "%2B"). This is appropriate for query strings and form-encoded values but not for the path component of a URL. Note that this default is the reverse of Python's urllib module. .. versionadded:: 3.1 The ``plus`` argument """
if encoding is None: if plus: # unquote_to_bytes doesn't have a _plus variant value = to_basestring(value).replace("+", " ") return urllib.parse.unquote_to_bytes(value) else: unquote = urllib.parse.unquote_plus if plus else urllib.parse.unquote return unquote(to_basestring(value), encoding=encoding)
<SYSTEM_TASK:> Parses a query string like urlparse.parse_qs, but returns the <END_TASK> <USER_TASK:> Description: def parse_qs_bytes( qs: str, keep_blank_values: bool = False, strict_parsing: bool = False ) -> Dict[str, List[bytes]]: """Parses a query string like urlparse.parse_qs, but returns the values as byte strings. Keys still become type str (interpreted as latin1 in python3!) because it's too painful to keep them as byte strings in python3 and in practice they're nearly always ascii anyway. """
# This is gross, but python3 doesn't give us another way. # Latin1 is the universal donor of character encodings. result = urllib.parse.parse_qs( qs, keep_blank_values, strict_parsing, encoding="latin1", errors="strict" ) encoded = {} for k, v in result.items(): encoded[k] = [i.encode("latin1") for i in v] return encoded
<SYSTEM_TASK:> Converts plain text into HTML with links. <END_TASK> <USER_TASK:> Description: def linkify( text: Union[str, bytes], shorten: bool = False, extra_params: Union[str, Callable[[str], str]] = "", require_protocol: bool = False, permitted_protocols: List[str] = ["http", "https"], ) -> str: """Converts plain text into HTML with links. For example: ``linkify("Hello http://tornadoweb.org!")`` would return ``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!`` Parameters: * ``shorten``: Long urls will be shortened for display. * ``extra_params``: Extra text to include in the link tag, or a callable taking the link as an argument and returning the extra text e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``, or:: def extra_params_cb(url): if url.startswith("http://example.com"): return 'class="internal"' else: return 'class="external" rel="nofollow"' linkify(text, extra_params=extra_params_cb) * ``require_protocol``: Only linkify urls which include a protocol. If this is False, urls such as www.facebook.com will also be linkified. * ``permitted_protocols``: List (or set) of protocols which should be linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp", "mailto"])``. It is very unsafe to include protocols such as ``javascript``. """
if extra_params and not callable(extra_params): extra_params = " " + extra_params.strip() def make_link(m: typing.Match) -> str: url = m.group(1) proto = m.group(2) if require_protocol and not proto: return url # not protocol, no linkify if proto and proto not in permitted_protocols: return url # bad protocol, no linkify href = m.group(1) if not proto: href = "http://" + href # no proto specified, use http if callable(extra_params): params = " " + extra_params(href).strip() else: params = extra_params # clip long urls. max_len is just an approximation max_len = 30 if shorten and len(url) > max_len: before_clip = url if proto: proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for : else: proto_len = 0 parts = url[proto_len:].split("/") if len(parts) > 1: # Grab the whole host part plus the first bit of the path # The path is usually not that interesting once shortened # (no more slug, etc), so it really just provides a little # extra indication of shortening. url = ( url[:proto_len] + parts[0] + "/" + parts[1][:8].split("?")[0].split(".")[0] ) if len(url) > max_len * 1.5: # still too long url = url[:max_len] if url != before_clip: amp = url.rfind("&") # avoid splitting html char entities if amp > max_len - 5: url = url[:amp] url += "..." if len(url) >= len(before_clip): url = before_clip else: # full url is visible on mouse-over (for those who don't # have a status bar, such as Safari by default) params += ' title="%s"' % href return u'<a href="%s"%s>%s</a>' % (href, params, url) # First HTML-escape so that our strings are all safe. # The regex is modified to avoid character entites other than &amp; so # that we won't pick up &quot;, etc. text = _unicode(xhtml_escape(text)) return _URL_RE.sub(make_link, text)
<SYSTEM_TASK:> Clears the `IOLoop` for the current thread. <END_TASK> <USER_TASK:> Description: def clear_current() -> None: """Clears the `IOLoop` for the current thread. Intended primarily for use by test frameworks in between tests. .. versionchanged:: 5.0 This method also clears the current `asyncio` event loop. """
old = IOLoop.current(instance=False) if old is not None: old._clear_current_hook() if asyncio is None: IOLoop._current.instance = None
<SYSTEM_TASK:> Registers the given handler to receive the given events for ``fd``. <END_TASK> <USER_TASK:> Description: def add_handler( # noqa: F811 self, fd: Union[int, _Selectable], handler: Callable[..., None], events: int ) -> None: """Registers the given handler to receive the given events for ``fd``. The ``fd`` argument may either be an integer file descriptor or a file-like object with a ``fileno()`` and ``close()`` method. The ``events`` argument is a bitwise or of the constants ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``. When an event occurs, ``handler(fd, events)`` will be run. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """
raise NotImplementedError()
<SYSTEM_TASK:> Runs a function in a ``concurrent.futures.Executor``. If <END_TASK> <USER_TASK:> Description: def run_in_executor( self, executor: Optional[concurrent.futures.Executor], func: Callable[..., _T], *args: Any ) -> Awaitable[_T]: """Runs a function in a ``concurrent.futures.Executor``. If ``executor`` is ``None``, the IO loop's default executor will be used. Use `functools.partial` to pass keyword arguments to ``func``. .. versionadded:: 5.0 """
if executor is None: if not hasattr(self, "_executor"): from tornado.process import cpu_count self._executor = concurrent.futures.ThreadPoolExecutor( max_workers=(cpu_count() * 5) ) # type: concurrent.futures.Executor executor = self._executor c_future = executor.submit(func, *args) # Concurrent Futures are not usable with await. Wrap this in a # Tornado Future instead, using self.add_future for thread-safety. t_future = Future() # type: Future[_T] self.add_future(c_future, lambda f: chain_future(f, t_future)) return t_future
<SYSTEM_TASK:> Concatenate url and arguments regardless of whether <END_TASK> <USER_TASK:> Description: def url_concat( url: str, args: Union[ None, Dict[str, str], List[Tuple[str, str]], Tuple[Tuple[str, str], ...] ], ) -> str: """Concatenate url and arguments regardless of whether url has existing query parameters. ``args`` may be either a dictionary or a list of key-value pairs (the latter allows for multiple values with the same key. >>> url_concat("http://example.com/foo", dict(c="d")) 'http://example.com/foo?c=d' >>> url_concat("http://example.com/foo?a=b", dict(c="d")) 'http://example.com/foo?a=b&c=d' >>> url_concat("http://example.com/foo?a=b", [("c", "d"), ("c", "d2")]) 'http://example.com/foo?a=b&c=d&c=d2' """
if args is None: return url parsed_url = urlparse(url) if isinstance(args, dict): parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True) parsed_query.extend(args.items()) elif isinstance(args, list) or isinstance(args, tuple): parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True) parsed_query.extend(args) else: err = "'args' parameter should be dict, list or tuple. Not {0}".format( type(args) ) raise TypeError(err) final_query = urlencode(parsed_query) url = urlunparse( ( parsed_url[0], parsed_url[1], parsed_url[2], parsed_url[3], final_query, parsed_url[5], ) ) return url
<SYSTEM_TASK:> Parses a Range header. <END_TASK> <USER_TASK:> Description: def _parse_request_range( range_header: str ) -> Optional[Tuple[Optional[int], Optional[int]]]: """Parses a Range header. Returns either ``None`` or tuple ``(start, end)``. Note that while the HTTP headers use inclusive byte positions, this method returns indexes suitable for use in slices. >>> start, end = _parse_request_range("bytes=1-2") >>> start, end (1, 3) >>> [0, 1, 2, 3, 4][start:end] [1, 2] >>> _parse_request_range("bytes=6-") (6, None) >>> _parse_request_range("bytes=-6") (-6, None) >>> _parse_request_range("bytes=-0") (None, 0) >>> _parse_request_range("bytes=") (None, None) >>> _parse_request_range("foo=42") >>> _parse_request_range("bytes=1-2,6-10") Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed). See [0] for the details of the range header. [0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges """
unit, _, value = range_header.partition("=") unit, value = unit.strip(), value.strip() if unit != "bytes": return None start_b, _, end_b = value.partition("-") try: start = _int_or_none(start_b) end = _int_or_none(end_b) except ValueError: return None if end is not None: if start is None: if end != 0: start = -end end = None else: end += 1 return (start, end)
<SYSTEM_TASK:> Parses a form request body. <END_TASK> <USER_TASK:> Description: def parse_body_arguments( content_type: str, body: bytes, arguments: Dict[str, List[bytes]], files: Dict[str, List[HTTPFile]], headers: HTTPHeaders = None, ) -> None: """Parses a form request body. Supports ``application/x-www-form-urlencoded`` and ``multipart/form-data``. The ``content_type`` parameter should be a string and ``body`` should be a byte string. The ``arguments`` and ``files`` parameters are dictionaries that will be updated with the parsed contents. """
if content_type.startswith("application/x-www-form-urlencoded"): if headers and "Content-Encoding" in headers: gen_log.warning( "Unsupported Content-Encoding: %s", headers["Content-Encoding"] ) return try: uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True) except Exception as e: gen_log.warning("Invalid x-www-form-urlencoded body: %s", e) uri_arguments = {} for name, values in uri_arguments.items(): if values: arguments.setdefault(name, []).extend(values) elif content_type.startswith("multipart/form-data"): if headers and "Content-Encoding" in headers: gen_log.warning( "Unsupported Content-Encoding: %s", headers["Content-Encoding"] ) return try: fields = content_type.split(";") for field in fields: k, sep, v = field.strip().partition("=") if k == "boundary" and v: parse_multipart_form_data(utf8(v), body, arguments, files) break else: raise ValueError("multipart boundary not found") except Exception as e: gen_log.warning("Invalid multipart/form-data: %s", e)
<SYSTEM_TASK:> Formats a timestamp in the format used by HTTP. <END_TASK> <USER_TASK:> Description: def format_timestamp( ts: Union[int, float, tuple, time.struct_time, datetime.datetime] ) -> str: """Formats a timestamp in the format used by HTTP. The argument may be a numeric timestamp as returned by `time.time`, a time tuple as returned by `time.gmtime`, or a `datetime.datetime` object. >>> format_timestamp(1359312200) 'Sun, 27 Jan 2013 18:43:20 GMT' """
if isinstance(ts, (int, float)): time_num = ts elif isinstance(ts, (tuple, time.struct_time)): time_num = calendar.timegm(ts) elif isinstance(ts, datetime.datetime): time_num = calendar.timegm(ts.utctimetuple()) else: raise TypeError("unknown timestamp type: %r" % ts) return email.utils.formatdate(time_num, usegmt=True)
<SYSTEM_TASK:> r"""Parse a Content-type like header. <END_TASK> <USER_TASK:> Description: def _parse_header(line: str) -> Tuple[str, Dict[str, str]]: r"""Parse a Content-type like header. Return the main content-type and a dictionary of options. >>> d = "form-data; foo=\"b\\\\a\\\"r\"; file*=utf-8''T%C3%A4st" >>> ct, d = _parse_header(d) >>> ct 'form-data' >>> d['file'] == r'T\u00e4st'.encode('ascii').decode('unicode_escape') True >>> d['foo'] 'b\\a"r' """
parts = _parseparam(";" + line) key = next(parts) # decode_params treats first argument special, but we already stripped key params = [("Dummy", "value")] for p in parts: i = p.find("=") if i >= 0: name = p[:i].strip().lower() value = p[i + 1 :].strip() params.append((name, native_str(value))) decoded_params = email.utils.decode_params(params) decoded_params.pop(0) # get rid of the dummy again pdict = {} for name, decoded_value in decoded_params: value = email.utils.collapse_rfc2231_value(decoded_value) if len(value) >= 2 and value[0] == '"' and value[-1] == '"': value = value[1:-1] pdict[name] = value return key, pdict
<SYSTEM_TASK:> Generator converting a result of ``parse_qs`` back to name-value pairs. <END_TASK> <USER_TASK:> Description: def qs_to_qsl(qs: Dict[str, List[AnyStr]]) -> Iterable[Tuple[str, AnyStr]]: """Generator converting a result of ``parse_qs`` back to name-value pairs. .. versionadded:: 5.0 """
for k, vs in qs.items(): for v in vs: yield (k, v)
<SYSTEM_TASK:> Handle double quotes and escaping in cookie values. <END_TASK> <USER_TASK:> Description: def _unquote_cookie(s: str) -> str: """Handle double quotes and escaping in cookie values. This method is copied verbatim from the Python 3.5 standard library (http.cookies._unquote) so we don't have to depend on non-public interfaces. """
# If there aren't any doublequotes, # then there can't be any special characters. See RFC 2109. if s is None or len(s) < 2: return s if s[0] != '"' or s[-1] != '"': return s # We have to assume that we must decode this string. # Down to work. # Remove the "s s = s[1:-1] # Check for special sequences. Examples: # \012 --> \n # \" --> " # i = 0 n = len(s) res = [] while 0 <= i < n: o_match = _OctalPatt.search(s, i) q_match = _QuotePatt.search(s, i) if not o_match and not q_match: # Neither matched res.append(s[i:]) break # else: j = k = -1 if o_match: j = o_match.start(0) if q_match: k = q_match.start(0) if q_match and (not o_match or k < j): # QuotePatt matched res.append(s[i:k]) res.append(s[k + 1]) i = k + 2 else: # OctalPatt matched res.append(s[i:j]) res.append(chr(int(s[j + 1 : j + 4], 8))) i = j + 4 return _nulljoin(res)
<SYSTEM_TASK:> Returns all values for the given header as a list. <END_TASK> <USER_TASK:> Description: def get_list(self, name: str) -> List[str]: """Returns all values for the given header as a list."""
norm_name = _normalized_headers[name] return self._as_list.get(norm_name, [])
<SYSTEM_TASK:> Returns the amount of time it took for this request to execute. <END_TASK> <USER_TASK:> Description: def request_time(self) -> float: """Returns the amount of time it took for this request to execute."""
if self._finish_time is None: return time.time() - self._start_time else: return self._finish_time - self._start_time
<SYSTEM_TASK:> Returns the client's SSL certificate, if any. <END_TASK> <USER_TASK:> Description: def get_ssl_certificate( self, binary_form: bool = False ) -> Union[None, Dict, bytes]: """Returns the client's SSL certificate, if any. To use client certificates, the HTTPServer's `ssl.SSLContext.verify_mode` field must be set, e.g.:: ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain("foo.crt", "foo.key") ssl_ctx.load_verify_locations("cacerts.pem") ssl_ctx.verify_mode = ssl.CERT_REQUIRED server = HTTPServer(app, ssl_options=ssl_ctx) By default, the return value is a dictionary (or None, if no client certificate is present). If ``binary_form`` is true, a DER-encoded form of the certificate is returned instead. See SSLSocket.getpeercert() in the standard library for more details. http://docs.python.org/library/ssl.html#sslsocket-objects """
try: if self.connection is None: return None # TODO: add a method to HTTPConnection for this so it can work with HTTP/2 return self.connection.stream.socket.getpeercert( # type: ignore binary_form=binary_form ) except SSLError: return None
<SYSTEM_TASK:> Adds an `.IOLoop` event handler to accept new connections on ``sock``. <END_TASK> <USER_TASK:> Description: def add_accept_handler( sock: socket.socket, callback: Callable[[socket.socket, Any], None] ) -> Callable[[], None]: """Adds an `.IOLoop` event handler to accept new connections on ``sock``. When a connection is accepted, ``callback(connection, address)`` will be run (``connection`` is a socket object, and ``address`` is the address of the other end of the connection). Note that this signature is different from the ``callback(fd, events)`` signature used for `.IOLoop` handlers. A callable is returned which, when called, will remove the `.IOLoop` event handler and stop processing further incoming connections. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. .. versionchanged:: 5.0 A callable is returned (``None`` was returned before). """
io_loop = IOLoop.current() removed = [False] def accept_handler(fd: socket.socket, events: int) -> None: # More connections may come in while we're handling callbacks; # to prevent starvation of other tasks we must limit the number # of connections we accept at a time. Ideally we would accept # up to the number of connections that were waiting when we # entered this method, but this information is not available # (and rearranging this method to call accept() as many times # as possible before running any callbacks would have adverse # effects on load balancing in multiprocess configurations). # Instead, we use the (default) listen backlog as a rough # heuristic for the number of connections we can reasonably # accept at once. for i in range(_DEFAULT_BACKLOG): if removed[0]: # The socket was probably closed return try: connection, address = sock.accept() except socket.error as e: # _ERRNO_WOULDBLOCK indicate we have accepted every # connection that is available. if errno_from_exception(e) in _ERRNO_WOULDBLOCK: return # ECONNABORTED indicates that there was a connection # but it was closed while still in the accept queue. # (observed on FreeBSD). if errno_from_exception(e) == errno.ECONNABORTED: continue raise set_close_exec(connection.fileno()) callback(connection, address) def remove_handler() -> None: io_loop.remove_handler(sock) removed[0] = True io_loop.add_handler(sock, accept_handler, IOLoop.READ) return remove_handler
<SYSTEM_TASK:> Returns ``True`` if the given string is a well-formed IP address. <END_TASK> <USER_TASK:> Description: def is_valid_ip(ip: str) -> bool: """Returns ``True`` if the given string is a well-formed IP address. Supports IPv4 and IPv6. """
if not ip or "\x00" in ip: # getaddrinfo resolves empty strings to localhost, and truncates # on zero bytes. return False try: res = socket.getaddrinfo( ip, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_NUMERICHOST ) return bool(res) except socket.gaierror as e: if e.args[0] == socket.EAI_NONAME: return False raise return True
<SYSTEM_TASK:> Try to convert an ``ssl_options`` dictionary to an <END_TASK> <USER_TASK:> Description: def ssl_options_to_context( ssl_options: Union[Dict[str, Any], ssl.SSLContext] ) -> ssl.SSLContext: """Try to convert an ``ssl_options`` dictionary to an `~ssl.SSLContext` object. The ``ssl_options`` dictionary contains keywords to be passed to `ssl.wrap_socket`. In Python 2.7.9+, `ssl.SSLContext` objects can be used instead. This function converts the dict form to its `~ssl.SSLContext` equivalent, and may be used when a component which accepts both forms needs to upgrade to the `~ssl.SSLContext` version to use features like SNI or NPN. """
if isinstance(ssl_options, ssl.SSLContext): return ssl_options assert isinstance(ssl_options, dict) assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options # Can't use create_default_context since this interface doesn't # tell us client vs server. context = ssl.SSLContext(ssl_options.get("ssl_version", ssl.PROTOCOL_SSLv23)) if "certfile" in ssl_options: context.load_cert_chain( ssl_options["certfile"], ssl_options.get("keyfile", None) ) if "cert_reqs" in ssl_options: context.verify_mode = ssl_options["cert_reqs"] if "ca_certs" in ssl_options: context.load_verify_locations(ssl_options["ca_certs"]) if "ciphers" in ssl_options: context.set_ciphers(ssl_options["ciphers"]) if hasattr(ssl, "OP_NO_COMPRESSION"): # Disable TLS compression to avoid CRIME and related attacks. # This constant depends on openssl version 1.0. # TODO: Do we need to do this ourselves or can we trust # the defaults? context.options |= ssl.OP_NO_COMPRESSION return context
<SYSTEM_TASK:> Returns an ``ssl.SSLSocket`` wrapping the given socket. <END_TASK> <USER_TASK:> Description: def ssl_wrap_socket( socket: socket.socket, ssl_options: Union[Dict[str, Any], ssl.SSLContext], server_hostname: str = None, **kwargs: Any ) -> ssl.SSLSocket: """Returns an ``ssl.SSLSocket`` wrapping the given socket. ``ssl_options`` may be either an `ssl.SSLContext` object or a dictionary (as accepted by `ssl_options_to_context`). Additional keyword arguments are passed to ``wrap_socket`` (either the `~ssl.SSLContext` method or the `ssl` module function as appropriate). """
context = ssl_options_to_context(ssl_options) if ssl.HAS_SNI: # In python 3.4, wrap_socket only accepts the server_hostname # argument if HAS_SNI is true. # TODO: add a unittest (python added server-side SNI support in 3.4) # In the meantime it can be manually tested with # python3 -m tornado.httpclient https://sni.velox.ch return context.wrap_socket(socket, server_hostname=server_hostname, **kwargs) else: return context.wrap_socket(socket, **kwargs)
<SYSTEM_TASK:> Set the given ``exc`` as the `Future`'s exception. <END_TASK> <USER_TASK:> Description: def future_set_exception_unless_cancelled( future: "Union[futures.Future[_T], Future[_T]]", exc: BaseException ) -> None: """Set the given ``exc`` as the `Future`'s exception. If the Future is already canceled, logs the exception instead. If this logging is not desired, the caller should explicitly check the state of the Future and call ``Future.set_exception`` instead of this wrapper. Avoids ``asyncio.InvalidStateError`` when calling ``set_exception()`` on a cancelled `asyncio.Future`. .. versionadded:: 6.0 """
if not future.cancelled(): future.set_exception(exc) else: app_log.error("Exception after Future was cancelled", exc_info=exc)
<SYSTEM_TASK:> Set the given ``exc_info`` as the `Future`'s exception. <END_TASK> <USER_TASK:> Description: def future_set_exc_info( future: "Union[futures.Future[_T], Future[_T]]", exc_info: Tuple[ Optional[type], Optional[BaseException], Optional[types.TracebackType] ], ) -> None: """Set the given ``exc_info`` as the `Future`'s exception. Understands both `asyncio.Future` and the extensions in older versions of Tornado to enable better tracebacks on Python 2. .. versionadded:: 5.0 .. versionchanged:: 6.0 If the future is already cancelled, this function is a no-op. (previously ``asyncio.InvalidStateError`` would be raised) """
if exc_info[1] is None: raise Exception("future_set_exc_info called with no exception") future_set_exception_unless_cancelled(future, exc_info[1])
<SYSTEM_TASK:> Arrange to call ``callback`` when ``future`` is complete. <END_TASK> <USER_TASK:> Description: def future_add_done_callback( # noqa: F811 future: "Union[futures.Future[_T], Future[_T]]", callback: Callable[..., None] ) -> None: """Arrange to call ``callback`` when ``future`` is complete. ``callback`` is invoked with one argument, the ``future``. If ``future`` is already done, ``callback`` is invoked immediately. This may differ from the behavior of ``Future.add_done_callback``, which makes no such guarantee. .. versionadded:: 5.0 """
if future.done(): callback(future) else: future.add_done_callback(callback)
<SYSTEM_TASK:> Transform whitespace in ``text`` according to ``mode``. <END_TASK> <USER_TASK:> Description: def filter_whitespace(mode: str, text: str) -> str: """Transform whitespace in ``text`` according to ``mode``. Available modes are: * ``all``: Return all whitespace unmodified. * ``single``: Collapse consecutive whitespace with a single whitespace character, preserving newlines. * ``oneline``: Collapse all runs of whitespace into a single space character, removing all newlines in the process. .. versionadded:: 4.3 """
if mode == "all": return text elif mode == "single": text = re.sub(r"([\t ]+)", " ", text) text = re.sub(r"(\s*\n\s*)", "\n", text) return text elif mode == "oneline": return re.sub(r"(\s+)", " ", text) else: raise Exception("invalid whitespace mode %s" % mode)
<SYSTEM_TASK:> Advance the current buffer position by ``size`` bytes. <END_TASK> <USER_TASK:> Description: def advance(self, size: int) -> None: """ Advance the current buffer position by ``size`` bytes. """
assert 0 < size <= self._size self._size -= size pos = self._first_pos buffers = self._buffers while buffers and size > 0: is_large, b = buffers[0] b_remain = len(b) - size - pos if b_remain <= 0: buffers.popleft() size -= len(b) - pos pos = 0 elif is_large: pos += size size = 0 else: # Amortized O(1) shrink for Python 2 pos += size if len(b) <= 2 * pos: del typing.cast(bytearray, b)[:pos] pos = 0 size = 0 assert size == 0 self._first_pos = pos
<SYSTEM_TASK:> Asynchronously read until we have matched the given regex. <END_TASK> <USER_TASK:> Description: def read_until_regex(self, regex: bytes, max_bytes: int = None) -> Awaitable[bytes]: """Asynchronously read until we have matched the given regex. The result includes the data that matches the regex and anything that came before it. If ``max_bytes`` is not None, the connection will be closed if more than ``max_bytes`` bytes have been read and the regex is not satisfied. .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """
future = self._start_read() self._read_regex = re.compile(regex) self._read_max_bytes = max_bytes try: self._try_inline_read() except UnsatisfiableReadError as e: # Handle this the same way as in _handle_events. gen_log.info("Unsatisfiable read, closing connection: %s" % e) self.close(exc_info=e) return future except: # Ensure that the future doesn't log an error because its # failure was never examined. future.add_done_callback(lambda f: f.exception()) raise return future
<SYSTEM_TASK:> Asynchronously read until we have found the given delimiter. <END_TASK> <USER_TASK:> Description: def read_until(self, delimiter: bytes, max_bytes: int = None) -> Awaitable[bytes]: """Asynchronously read until we have found the given delimiter. The result includes all the data read including the delimiter. If ``max_bytes`` is not None, the connection will be closed if more than ``max_bytes`` bytes have been read and the delimiter is not found. .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """
future = self._start_read() self._read_delimiter = delimiter self._read_max_bytes = max_bytes try: self._try_inline_read() except UnsatisfiableReadError as e: # Handle this the same way as in _handle_events. gen_log.info("Unsatisfiable read, closing connection: %s" % e) self.close(exc_info=e) return future except: future.add_done_callback(lambda f: f.exception()) raise return future
<SYSTEM_TASK:> Asynchronously reads all data from the socket until it is closed. <END_TASK> <USER_TASK:> Description: def read_until_close(self) -> Awaitable[bytes]: """Asynchronously reads all data from the socket until it is closed. This will buffer all available data until ``max_buffer_size`` is reached. If flow control or cancellation are desired, use a loop with `read_bytes(partial=True) <.read_bytes>` instead. .. versionchanged:: 4.0 The callback argument is now optional and a `.Future` will be returned if it is omitted. .. versionchanged:: 6.0 The ``callback`` and ``streaming_callback`` arguments have been removed. Use the returned `.Future` (and `read_bytes` with ``partial=True`` for ``streaming_callback``) instead. """
future = self._start_read() if self.closed(): self._finish_read(self._read_buffer_size, False) return future self._read_until_close = True try: self._try_inline_read() except: future.add_done_callback(lambda f: f.exception()) raise return future
<SYSTEM_TASK:> Asynchronously write the given data to this stream. <END_TASK> <USER_TASK:> Description: def write(self, data: Union[bytes, memoryview]) -> "Future[None]": """Asynchronously write the given data to this stream. This method returns a `.Future` that resolves (with a result of ``None``) when the write has been completed. The ``data`` argument may be of type `bytes` or `memoryview`. .. versionchanged:: 4.0 Now returns a `.Future` if no callback is given. .. versionchanged:: 4.5 Added support for `memoryview` arguments. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """
self._check_closed() if data: if ( self.max_write_buffer_size is not None and len(self._write_buffer) + len(data) > self.max_write_buffer_size ): raise StreamBufferFullError("Reached maximum write buffer size") self._write_buffer.append(data) self._total_write_index += len(data) future = Future() # type: Future[None] future.add_done_callback(lambda f: f.exception()) self._write_futures.append((self._total_write_index, future)) if not self._connecting: self._handle_write() if self._write_buffer: self._add_io_state(self.io_loop.WRITE) self._maybe_add_error_listener() return future
<SYSTEM_TASK:> Call the given callback when the stream is closed. <END_TASK> <USER_TASK:> Description: def set_close_callback(self, callback: Optional[Callable[[], None]]) -> None: """Call the given callback when the stream is closed. This mostly is not necessary for applications that use the `.Future` interface; all outstanding ``Futures`` will resolve with a `StreamClosedError` when the stream is closed. However, it is still useful as a way to signal that the stream has been closed while no other read or write is in progress. Unlike other callback-based interfaces, ``set_close_callback`` was not removed in Tornado 6.0. """
self._close_callback = callback self._maybe_add_error_listener()
<SYSTEM_TASK:> Close this stream. <END_TASK> <USER_TASK:> Description: def close( self, exc_info: Union[ None, bool, BaseException, Tuple[ "Optional[Type[BaseException]]", Optional[BaseException], Optional[TracebackType], ], ] = False, ) -> None: """Close this stream. If ``exc_info`` is true, set the ``error`` attribute to the current exception from `sys.exc_info` (or if ``exc_info`` is a tuple, use that instead of `sys.exc_info`). """
if not self.closed(): if exc_info: if isinstance(exc_info, tuple): self.error = exc_info[1] elif isinstance(exc_info, BaseException): self.error = exc_info else: exc_info = sys.exc_info() if any(exc_info): self.error = exc_info[1] if self._read_until_close: self._read_until_close = False self._finish_read(self._read_buffer_size, False) if self._state is not None: self.io_loop.remove_handler(self.fileno()) self._state = None self.close_fd() self._closed = True self._signal_closed()
<SYSTEM_TASK:> Attempt to complete the current read operation from buffered data. <END_TASK> <USER_TASK:> Description: def _try_inline_read(self) -> None: """Attempt to complete the current read operation from buffered data. If the read can be completed without blocking, schedules the read callback on the next IOLoop iteration; otherwise starts listening for reads on the socket. """
# See if we've already got the data from a previous read pos = self._find_read_pos() if pos is not None: self._read_from_buffer(pos) return self._check_closed() pos = self._read_to_buffer_loop() if pos is not None: self._read_from_buffer(pos) return # We couldn't satisfy the read inline, so make sure we're # listening for new data unless the stream is closed. if not self.closed(): self._add_io_state(ioloop.IOLoop.READ)
<SYSTEM_TASK:> Reads from the socket and appends the result to the read buffer. <END_TASK> <USER_TASK:> Description: def _read_to_buffer(self) -> Optional[int]: """Reads from the socket and appends the result to the read buffer. Returns the number of bytes read. Returns 0 if there is nothing to read (i.e. the read returns EWOULDBLOCK or equivalent). On error closes the socket and raises an exception. """
try: while True: try: if self._user_read_buffer: buf = memoryview(self._read_buffer)[ self._read_buffer_size : ] # type: Union[memoryview, bytearray] else: buf = bytearray(self.read_chunk_size) bytes_read = self.read_from_fd(buf) except (socket.error, IOError, OSError) as e: if errno_from_exception(e) == errno.EINTR: continue # ssl.SSLError is a subclass of socket.error if self._is_connreset(e): # Treat ECONNRESET as a connection close rather than # an error to minimize log spam (the exception will # be available on self.error for apps that care). self.close(exc_info=e) return None self.close(exc_info=e) raise break if bytes_read is None: return 0 elif bytes_read == 0: self.close() return 0 if not self._user_read_buffer: self._read_buffer += memoryview(buf)[:bytes_read] self._read_buffer_size += bytes_read finally: # Break the reference to buf so we don't waste a chunk's worth of # memory in case an exception hangs on to our stack frame. del buf if self._read_buffer_size > self.max_buffer_size: gen_log.error("Reached maximum read buffer size") self.close() raise StreamBufferFullError("Reached maximum read buffer size") return bytes_read
<SYSTEM_TASK:> Attempts to complete the currently-pending read from the buffer. <END_TASK> <USER_TASK:> Description: def _read_from_buffer(self, pos: int) -> None: """Attempts to complete the currently-pending read from the buffer. The argument is either a position in the read buffer or None, as returned by _find_read_pos. """
self._read_bytes = self._read_delimiter = self._read_regex = None self._read_partial = False self._finish_read(pos, False)
<SYSTEM_TASK:> Attempts to find a position in the read buffer that satisfies <END_TASK> <USER_TASK:> Description: def _find_read_pos(self) -> Optional[int]: """Attempts to find a position in the read buffer that satisfies the currently-pending read. Returns a position in the buffer if the current read can be satisfied, or None if it cannot. """
if self._read_bytes is not None and ( self._read_buffer_size >= self._read_bytes or (self._read_partial and self._read_buffer_size > 0) ): num_bytes = min(self._read_bytes, self._read_buffer_size) return num_bytes elif self._read_delimiter is not None: # Multi-byte delimiters (e.g. '\r\n') may straddle two # chunks in the read buffer, so we can't easily find them # without collapsing the buffer. However, since protocols # using delimited reads (as opposed to reads of a known # length) tend to be "line" oriented, the delimiter is likely # to be in the first few chunks. Merge the buffer gradually # since large merges are relatively expensive and get undone in # _consume(). if self._read_buffer: loc = self._read_buffer.find( self._read_delimiter, self._read_buffer_pos ) if loc != -1: loc -= self._read_buffer_pos delimiter_len = len(self._read_delimiter) self._check_max_bytes(self._read_delimiter, loc + delimiter_len) return loc + delimiter_len self._check_max_bytes(self._read_delimiter, self._read_buffer_size) elif self._read_regex is not None: if self._read_buffer: m = self._read_regex.search(self._read_buffer, self._read_buffer_pos) if m is not None: loc = m.end() - self._read_buffer_pos self._check_max_bytes(self._read_regex, loc) return loc self._check_max_bytes(self._read_regex, self._read_buffer_size) return None
<SYSTEM_TASK:> Connects the socket to a remote address without blocking. <END_TASK> <USER_TASK:> Description: def connect( self: _IOStreamType, address: tuple, server_hostname: str = None ) -> "Future[_IOStreamType]": """Connects the socket to a remote address without blocking. May only be called if the socket passed to the constructor was not previously connected. The address parameter is in the same format as for `socket.connect <socket.socket.connect>` for the type of socket passed to the IOStream constructor, e.g. an ``(ip, port)`` tuple. Hostnames are accepted here, but will be resolved synchronously and block the IOLoop. If you have a hostname instead of an IP address, the `.TCPClient` class is recommended instead of calling this method directly. `.TCPClient` will do asynchronous DNS resolution and handle both IPv4 and IPv6. If ``callback`` is specified, it will be called with no arguments when the connection is completed; if not this method returns a `.Future` (whose result after a successful connection will be the stream itself). In SSL mode, the ``server_hostname`` parameter will be used for certificate validation (unless disabled in the ``ssl_options``) and SNI (if supported; requires Python 2.7.9+). Note that it is safe to call `IOStream.write <BaseIOStream.write>` while the connection is pending, in which case the data will be written as soon as the connection is ready. Calling `IOStream` read methods before the socket is connected works on some platforms but is non-portable. .. versionchanged:: 4.0 If no callback is given, returns a `.Future`. .. versionchanged:: 4.2 SSL certificates are validated by default; pass ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a suitably-configured `ssl.SSLContext` to the `SSLIOStream` constructor to disable. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """
self._connecting = True future = Future() # type: Future[_IOStreamType] self._connect_future = typing.cast("Future[IOStream]", future) try: self.socket.connect(address) except socket.error as e: # In non-blocking mode we expect connect() to raise an # exception with EINPROGRESS or EWOULDBLOCK. # # On freebsd, other errors such as ECONNREFUSED may be # returned immediately when attempting to connect to # localhost, so handle them the same way as an error # reported later in _handle_connect. if ( errno_from_exception(e) not in _ERRNO_INPROGRESS and errno_from_exception(e) not in _ERRNO_WOULDBLOCK ): if future is None: gen_log.warning( "Connect error on fd %s: %s", self.socket.fileno(), e ) self.close(exc_info=e) return future self._add_io_state(self.io_loop.WRITE) return future
<SYSTEM_TASK:> Convert this `IOStream` to an `SSLIOStream`. <END_TASK> <USER_TASK:> Description: def start_tls( self, server_side: bool, ssl_options: Union[Dict[str, Any], ssl.SSLContext] = None, server_hostname: str = None, ) -> Awaitable["SSLIOStream"]: """Convert this `IOStream` to an `SSLIOStream`. This enables protocols that begin in clear-text mode and switch to SSL after some initial negotiation (such as the ``STARTTLS`` extension to SMTP and IMAP). This method cannot be used if there are outstanding reads or writes on the stream, or if there is any data in the IOStream's buffer (data in the operating system's socket buffer is allowed). This means it must generally be used immediately after reading or writing the last clear-text data. It can also be used immediately after connecting, before any reads or writes. The ``ssl_options`` argument may be either an `ssl.SSLContext` object or a dictionary of keyword arguments for the `ssl.wrap_socket` function. The ``server_hostname`` argument will be used for certificate validation unless disabled in the ``ssl_options``. This method returns a `.Future` whose result is the new `SSLIOStream`. After this method has been called, any other operation on the original stream is undefined. If a close callback is defined on this stream, it will be transferred to the new stream. .. versionadded:: 4.0 .. versionchanged:: 4.2 SSL certificates are validated by default; pass ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a suitably-configured `ssl.SSLContext` to disable. """
if ( self._read_future or self._write_futures or self._connect_future or self._closed or self._read_buffer or self._write_buffer ): raise ValueError("IOStream is not idle; cannot convert to SSL") if ssl_options is None: if server_side: ssl_options = _server_ssl_defaults else: ssl_options = _client_ssl_defaults socket = self.socket self.io_loop.remove_handler(socket) self.socket = None # type: ignore socket = ssl_wrap_socket( socket, ssl_options, server_hostname=server_hostname, server_side=server_side, do_handshake_on_connect=False, ) orig_close_callback = self._close_callback self._close_callback = None future = Future() # type: Future[SSLIOStream] ssl_stream = SSLIOStream(socket, ssl_options=ssl_options) ssl_stream.set_close_callback(orig_close_callback) ssl_stream._ssl_connect_future = future ssl_stream.max_buffer_size = self.max_buffer_size ssl_stream.read_chunk_size = self.read_chunk_size return future
<SYSTEM_TASK:> Returns ``True`` if peercert is valid according to the configured <END_TASK> <USER_TASK:> Description: def _verify_cert(self, peercert: Any) -> bool: """Returns ``True`` if peercert is valid according to the configured validation mode and hostname. The ssl handshake already tested the certificate for a valid CA signature; the only thing that remains is to check the hostname. """
if isinstance(self._ssl_options, dict): verify_mode = self._ssl_options.get("cert_reqs", ssl.CERT_NONE) elif isinstance(self._ssl_options, ssl.SSLContext): verify_mode = self._ssl_options.verify_mode assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL) if verify_mode == ssl.CERT_NONE or self._server_hostname is None: return True cert = self.socket.getpeercert() if cert is None and verify_mode == ssl.CERT_REQUIRED: gen_log.warning("No SSL certificate given") return False try: ssl.match_hostname(peercert, self._server_hostname) except ssl.CertificateError as e: gen_log.warning("Invalid SSL certificate: %s" % e) return False else: return True
<SYSTEM_TASK:> Wait for the initial SSL handshake to complete. <END_TASK> <USER_TASK:> Description: def wait_for_handshake(self) -> "Future[SSLIOStream]": """Wait for the initial SSL handshake to complete. If a ``callback`` is given, it will be called with no arguments once the handshake is complete; otherwise this method returns a `.Future` which will resolve to the stream itself after the handshake is complete. Once the handshake is complete, information such as the peer's certificate and NPN/ALPN selections may be accessed on ``self.socket``. This method is intended for use on server-side streams or after using `IOStream.start_tls`; it should not be used with `IOStream.connect` (which already waits for the handshake to complete). It may only be called once per stream. .. versionadded:: 4.2 .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """
if self._ssl_connect_future is not None: raise RuntimeError("Already waiting") future = self._ssl_connect_future = Future() if not self._ssl_accepting: self._finish_ssl_connect() return future
<SYSTEM_TASK:> Turns on formatted logging output as configured. <END_TASK> <USER_TASK:> Description: def enable_pretty_logging(options: Any = None, logger: logging.Logger = None) -> None: """Turns on formatted logging output as configured. This is called automatically by `tornado.options.parse_command_line` and `tornado.options.parse_config_file`. """
if options is None: import tornado.options options = tornado.options.options if options.logging is None or options.logging.lower() == "none": return if logger is None: logger = logging.getLogger() logger.setLevel(getattr(logging, options.logging.upper())) if options.log_file_prefix: rotate_mode = options.log_rotate_mode if rotate_mode == "size": channel = logging.handlers.RotatingFileHandler( filename=options.log_file_prefix, maxBytes=options.log_file_max_size, backupCount=options.log_file_num_backups, encoding="utf-8", ) # type: logging.Handler elif rotate_mode == "time": channel = logging.handlers.TimedRotatingFileHandler( filename=options.log_file_prefix, when=options.log_rotate_when, interval=options.log_rotate_interval, backupCount=options.log_file_num_backups, encoding="utf-8", ) else: error_message = ( "The value of log_rotate_mode option should be " + '"size" or "time", not "%s".' % rotate_mode ) raise ValueError(error_message) channel.setFormatter(LogFormatter(color=False)) logger.addHandler(channel) if options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers): # Set up color if we are in a tty and curses is installed channel = logging.StreamHandler() channel.setFormatter(LogFormatter()) logger.addHandler(channel)
<SYSTEM_TASK:> Add logging-related flags to ``options``. <END_TASK> <USER_TASK:> Description: def define_logging_options(options: Any = None) -> None: """Add logging-related flags to ``options``. These options are present automatically on the default options instance; this method is only necessary if you have created your own `.OptionParser`. .. versionadded:: 4.2 This function existed in prior versions but was broken and undocumented until 4.2. """
if options is None: # late import to prevent cycle import tornado.options options = tornado.options.options options.define( "logging", default="info", help=( "Set the Python log level. If 'none', tornado won't touch the " "logging configuration." ), metavar="debug|info|warning|error|none", ) options.define( "log_to_stderr", type=bool, default=None, help=( "Send log output to stderr (colorized if possible). " "By default use stderr if --log_file_prefix is not set and " "no other logging is configured." ), ) options.define( "log_file_prefix", type=str, default=None, metavar="PATH", help=( "Path prefix for log files. " "Note that if you are running multiple tornado processes, " "log_file_prefix must be different for each of them (e.g. " "include the port number)" ), ) options.define( "log_file_max_size", type=int, default=100 * 1000 * 1000, help="max size of log files before rollover", ) options.define( "log_file_num_backups", type=int, default=10, help="number of log files to keep" ) options.define( "log_rotate_when", type=str, default="midnight", help=( "specify the type of TimedRotatingFileHandler interval " "other options:('S', 'M', 'H', 'D', 'W0'-'W6')" ), ) options.define( "log_rotate_interval", type=int, default=1, help="The interval value of timed rotating", ) options.define( "log_rotate_mode", type=str, default="size", help="The mode of rotating files(time or size)", ) options.add_parse_callback(lambda: enable_pretty_logging(options))
<SYSTEM_TASK:> Creates a AsyncHTTPClient. <END_TASK> <USER_TASK:> Description: def initialize( # type: ignore self, max_clients: int = 10, hostname_mapping: Dict[str, str] = None, max_buffer_size: int = 104857600, resolver: Resolver = None, defaults: Dict[str, Any] = None, max_header_size: int = None, max_body_size: int = None, ) -> None: """Creates a AsyncHTTPClient. Only a single AsyncHTTPClient instance exists per IOLoop in order to provide limitations on the number of pending connections. ``force_instance=True`` may be used to suppress this behavior. Note that because of this implicit reuse, unless ``force_instance`` is used, only the first call to the constructor actually uses its arguments. It is recommended to use the ``configure`` method instead of the constructor to ensure that arguments take effect. ``max_clients`` is the number of concurrent requests that can be in progress; when this limit is reached additional requests will be queued. Note that time spent waiting in this queue still counts against the ``request_timeout``. ``hostname_mapping`` is a dictionary mapping hostnames to IP addresses. It can be used to make local DNS changes when modifying system-wide settings like ``/etc/hosts`` is not possible or desirable (e.g. in unittests). ``max_buffer_size`` (default 100MB) is the number of bytes that can be read into memory at once. ``max_body_size`` (defaults to ``max_buffer_size``) is the largest response body that the client will accept. Without a ``streaming_callback``, the smaller of these two limits applies; with a ``streaming_callback`` only ``max_body_size`` does. .. versionchanged:: 4.2 Added the ``max_body_size`` argument. """
super(SimpleAsyncHTTPClient, self).initialize(defaults=defaults) self.max_clients = max_clients self.queue = ( collections.deque() ) # type: Deque[Tuple[object, HTTPRequest, Callable[[HTTPResponse], None]]] self.active = ( {} ) # type: Dict[object, Tuple[HTTPRequest, Callable[[HTTPResponse], None]]] self.waiting = ( {} ) # type: Dict[object, Tuple[HTTPRequest, Callable[[HTTPResponse], None], object]] self.max_buffer_size = max_buffer_size self.max_header_size = max_header_size self.max_body_size = max_body_size # TCPClient could create a Resolver for us, but we have to do it # ourselves to support hostname_mapping. if resolver: self.resolver = resolver self.own_resolver = False else: self.resolver = Resolver() self.own_resolver = True if hostname_mapping is not None: self.resolver = OverrideResolver( resolver=self.resolver, mapping=hostname_mapping ) self.tcp_client = TCPClient(resolver=self.resolver)
<SYSTEM_TASK:> Timeout callback of request. <END_TASK> <USER_TASK:> Description: def _on_timeout(self, key: object, info: str = None) -> None: """Timeout callback of request. Construct a timeout HTTPResponse when a timeout occurs. :arg object key: A simple object to mark the request. :info string key: More detailed timeout information. """
request, callback, timeout_handle = self.waiting[key] self.queue.remove((key, request, callback)) error_message = "Timeout {0}".format(info) if info else "Timeout" timeout_response = HTTPResponse( request, 599, error=HTTPTimeoutError(error_message), request_time=self.io_loop.time() - request.start_time, ) self.io_loop.add_callback(callback, timeout_response) del self.waiting[key]
<SYSTEM_TASK:> Timeout callback of _HTTPConnection instance. <END_TASK> <USER_TASK:> Description: def _on_timeout(self, info: str = None) -> None: """Timeout callback of _HTTPConnection instance. Raise a `HTTPTimeoutError` when a timeout occurs. :info string key: More detailed timeout information. """
self._timeout = None error_message = "Timeout {0}".format(info) if info else "Timeout" if self.final_callback is not None: self._handle_exception( HTTPTimeoutError, HTTPTimeoutError(error_message), None )
<SYSTEM_TASK:> Redirects to the authentication URL for this service. <END_TASK> <USER_TASK:> Description: def authenticate_redirect( self, callback_uri: str = None, ax_attrs: List[str] = ["name", "email", "language", "username"], ) -> None: """Redirects to the authentication URL for this service. After authentication, the service will redirect back to the given callback URI with additional parameters including ``openid.mode``. We request the given attributes for the authenticated user by default (name, email, language, and username). If you don't need all those attributes for your app, you can request fewer with the ax_attrs keyword argument. .. versionchanged:: 6.0 The ``callback`` argument was removed and this method no longer returns an awaitable object. It is now an ordinary synchronous function. """
handler = cast(RequestHandler, self) callback_uri = callback_uri or handler.request.uri assert callback_uri is not None args = self._openid_args(callback_uri, ax_attrs=ax_attrs) endpoint = self._OPENID_ENDPOINT # type: ignore handler.redirect(endpoint + "?" + urllib.parse.urlencode(args))
<SYSTEM_TASK:> Gets the OAuth authorized user and access token. <END_TASK> <USER_TASK:> Description: async def get_authenticated_user( self, http_client: httpclient.AsyncHTTPClient = None ) -> Dict[str, Any]: """Gets the OAuth authorized user and access token. This method should be called from the handler for your OAuth callback URL to complete the registration process. We run the callback with the authenticated user dictionary. This dictionary will contain an ``access_key`` which can be used to make authorized requests to this service on behalf of the user. The dictionary will also contain other fields such as ``name``, depending on the service used. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """
handler = cast(RequestHandler, self) request_key = escape.utf8(handler.get_argument("oauth_token")) oauth_verifier = handler.get_argument("oauth_verifier", None) request_cookie = handler.get_cookie("_oauth_request_token") if not request_cookie: raise AuthError("Missing OAuth request token cookie") handler.clear_cookie("_oauth_request_token") cookie_key, cookie_secret = [ base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|") ] if cookie_key != request_key: raise AuthError("Request token does not match cookie") token = dict( key=cookie_key, secret=cookie_secret ) # type: Dict[str, Union[str, bytes]] if oauth_verifier: token["verifier"] = oauth_verifier if http_client is None: http_client = self.get_auth_http_client() assert http_client is not None response = await http_client.fetch(self._oauth_access_token_url(token)) access_token = _oauth_parse_response(response.body) user = await self._oauth_get_user_future(access_token) if not user: raise AuthError("Error getting user") user["access_token"] = access_token return user
<SYSTEM_TASK:> Subclasses must override this to get basic information about the <END_TASK> <USER_TASK:> Description: async def _oauth_get_user_future( self, access_token: Dict[str, Any] ) -> Dict[str, Any]: """Subclasses must override this to get basic information about the user. Should be a coroutine whose result is a dictionary containing information about the user, which may have been retrieved by using ``access_token`` to make a request to the service. The access token will be added to the returned dictionary to make the result of `get_authenticated_user`. .. versionchanged:: 5.1 Subclasses may also define this method with ``async def``. .. versionchanged:: 6.0 A synchronous fallback to ``_oauth_get_user`` was removed. """
raise NotImplementedError()
<SYSTEM_TASK:> Fetches the given URL auth an OAuth2 access token. <END_TASK> <USER_TASK:> Description: async def oauth2_request( self, url: str, access_token: str = None, post_args: Dict[str, Any] = None, **args: Any ) -> Any: """Fetches the given URL auth an OAuth2 access token. If the request is a POST, ``post_args`` should be provided. Query string arguments should be given as keyword arguments. Example usage: ..testcode:: class MainHandler(tornado.web.RequestHandler, tornado.auth.FacebookGraphMixin): @tornado.web.authenticated async def get(self): new_entry = await self.oauth2_request( "https://graph.facebook.com/me/feed", post_args={"message": "I am posting from my Tornado application!"}, access_token=self.current_user["access_token"]) if not new_entry: # Call failed; perhaps missing permission? await self.authorize_redirect() return self.finish("Posted a message!") .. testoutput:: :hide: .. versionadded:: 4.3 .. versionchanged::: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """
all_args = {} if access_token: all_args["access_token"] = access_token all_args.update(args) if all_args: url += "?" + urllib.parse.urlencode(all_args) http = self.get_auth_http_client() if post_args is not None: response = await http.fetch( url, method="POST", body=urllib.parse.urlencode(post_args) ) else: response = await http.fetch(url) return escape.json_decode(response.body)
<SYSTEM_TASK:> Just like `~OAuthMixin.authorize_redirect`, but <END_TASK> <USER_TASK:> Description: async def authenticate_redirect(self, callback_uri: str = None) -> None: """Just like `~OAuthMixin.authorize_redirect`, but auto-redirects if authorized. This is generally the right interface to use if you are using Twitter for single-sign on. .. versionchanged:: 3.1 Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """
http = self.get_auth_http_client() response = await http.fetch( self._oauth_request_token_url(callback_uri=callback_uri) ) self._on_request_token(self._OAUTH_AUTHENTICATE_URL, None, response)
<SYSTEM_TASK:> Handles the login for the Google user, returning an access token. <END_TASK> <USER_TASK:> Description: async def get_authenticated_user( self, redirect_uri: str, code: str ) -> Dict[str, Any]: """Handles the login for the Google user, returning an access token. The result is a dictionary containing an ``access_token`` field ([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)). Unlike other ``get_authenticated_user`` methods in this package, this method does not return any additional information about the user. The returned access token can be used with `OAuth2Mixin.oauth2_request` to request additional information (perhaps from ``https://www.googleapis.com/oauth2/v2/userinfo``) Example usage: .. testcode:: class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, tornado.auth.GoogleOAuth2Mixin): async def get(self): if self.get_argument('code', False): access = await self.get_authenticated_user( redirect_uri='http://your.site.com/auth/google', code=self.get_argument('code')) user = await self.oauth2_request( "https://www.googleapis.com/oauth2/v1/userinfo", access_token=access["access_token"]) # Save the user and access token with # e.g. set_secure_cookie. else: await self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', client_id=self.settings['google_oauth']['key'], scope=['profile', 'email'], response_type='code', extra_params={'approval_prompt': 'auto'}) .. testoutput:: :hide: .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """
# noqa: E501 handler = cast(RequestHandler, self) http = self.get_auth_http_client() body = urllib.parse.urlencode( { "redirect_uri": redirect_uri, "code": code, "client_id": handler.settings[self._OAUTH_SETTINGS_KEY]["key"], "client_secret": handler.settings[self._OAUTH_SETTINGS_KEY]["secret"], "grant_type": "authorization_code", } ) response = await http.fetch( self._OAUTH_ACCESS_TOKEN_URL, method="POST", headers={"Content-Type": "application/x-www-form-urlencoded"}, body=body, ) return escape.json_decode(response.body)
<SYSTEM_TASK:> Handles the login for the Facebook user, returning a user object. <END_TASK> <USER_TASK:> Description: async def get_authenticated_user( self, redirect_uri: str, client_id: str, client_secret: str, code: str, extra_fields: Dict[str, Any] = None, ) -> Optional[Dict[str, Any]]: """Handles the login for the Facebook user, returning a user object. Example usage: .. testcode:: class FacebookGraphLoginHandler(tornado.web.RequestHandler, tornado.auth.FacebookGraphMixin): async def get(self): if self.get_argument("code", False): user = await self.get_authenticated_user( redirect_uri='/auth/facebookgraph/', client_id=self.settings["facebook_api_key"], client_secret=self.settings["facebook_secret"], code=self.get_argument("code")) # Save the user with e.g. set_secure_cookie else: await self.authorize_redirect( redirect_uri='/auth/facebookgraph/', client_id=self.settings["facebook_api_key"], extra_params={"scope": "read_stream,offline_access"}) .. testoutput:: :hide: This method returns a dictionary which may contain the following fields: * ``access_token``, a string which may be passed to `facebook_request` * ``session_expires``, an integer encoded as a string representing the time until the access token expires in seconds. This field should be used like ``int(user['session_expires'])``; in a future version of Tornado it will change from a string to an integer. * ``id``, ``name``, ``first_name``, ``last_name``, ``locale``, ``picture``, ``link``, plus any fields named in the ``extra_fields`` argument. These fields are copied from the Facebook graph API `user object <https://developers.facebook.com/docs/graph-api/reference/user>`_ .. versionchanged:: 4.5 The ``session_expires`` field was updated to support changes made to the Facebook API in March 2017. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """
http = self.get_auth_http_client() args = { "redirect_uri": redirect_uri, "code": code, "client_id": client_id, "client_secret": client_secret, } fields = set( ["id", "name", "first_name", "last_name", "locale", "picture", "link"] ) if extra_fields: fields.update(extra_fields) response = await http.fetch( self._oauth_request_token_url(**args) # type: ignore ) args = escape.json_decode(response.body) session = { "access_token": args.get("access_token"), "expires_in": args.get("expires_in"), } assert session["access_token"] is not None user = await self.facebook_request( path="/me", access_token=session["access_token"], appsecret_proof=hmac.new( key=client_secret.encode("utf8"), msg=session["access_token"].encode("utf8"), digestmod=hashlib.sha256, ).hexdigest(), fields=",".join(fields), ) if user is None: return None fieldmap = {} for field in fields: fieldmap[field] = user.get(field) # session_expires is converted to str for compatibility with # older versions in which the server used url-encoding and # this code simply returned the string verbatim. # This should change in Tornado 5.0. fieldmap.update( { "access_token": session["access_token"], "session_expires": str(session.get("expires_in")), } ) return fieldmap
<SYSTEM_TASK:> Wait for `.notify`. <END_TASK> <USER_TASK:> Description: def wait(self, timeout: Union[float, datetime.timedelta] = None) -> Awaitable[bool]: """Wait for `.notify`. Returns a `.Future` that resolves ``True`` if the condition is notified, or ``False`` after a timeout. """
waiter = Future() # type: Future[bool] self._waiters.append(waiter) if timeout: def on_timeout() -> None: if not waiter.done(): future_set_result_unless_cancelled(waiter, False) self._garbage_collect() io_loop = ioloop.IOLoop.current() timeout_handle = io_loop.add_timeout(timeout, on_timeout) waiter.add_done_callback(lambda _: io_loop.remove_timeout(timeout_handle)) return waiter
<SYSTEM_TASK:> Set the internal flag to ``True``. All waiters are awakened. <END_TASK> <USER_TASK:> Description: def set(self) -> None: """Set the internal flag to ``True``. All waiters are awakened. Calling `.wait` once the flag is set will not block. """
if not self._value: self._value = True for fut in self._waiters: if not fut.done(): fut.set_result(None)
<SYSTEM_TASK:> Block until the internal flag is true. <END_TASK> <USER_TASK:> Description: def wait(self, timeout: Union[float, datetime.timedelta] = None) -> Awaitable[None]: """Block until the internal flag is true. Returns an awaitable, which raises `tornado.util.TimeoutError` after a timeout. """
fut = Future() # type: Future[None] if self._value: fut.set_result(None) return fut self._waiters.add(fut) fut.add_done_callback(lambda fut: self._waiters.remove(fut)) if timeout is None: return fut else: timeout_fut = gen.with_timeout( timeout, fut, quiet_exceptions=(CancelledError,) ) # This is a slightly clumsy workaround for the fact that # gen.with_timeout doesn't cancel its futures. Cancelling # fut will remove it from the waiters list. timeout_fut.add_done_callback( lambda tf: fut.cancel() if not fut.done() else None ) return timeout_fut
<SYSTEM_TASK:> Decrement the counter. Returns an awaitable. <END_TASK> <USER_TASK:> Description: def acquire( self, timeout: Union[float, datetime.timedelta] = None ) -> Awaitable[_ReleasingContextManager]: """Decrement the counter. Returns an awaitable. Block if the counter is zero and wait for a `.release`. The awaitable raises `.TimeoutError` after the deadline. """
waiter = Future() # type: Future[_ReleasingContextManager] if self._value > 0: self._value -= 1 waiter.set_result(_ReleasingContextManager(self)) else: self._waiters.append(waiter) if timeout: def on_timeout() -> None: if not waiter.done(): waiter.set_exception(gen.TimeoutError()) self._garbage_collect() io_loop = ioloop.IOLoop.current() timeout_handle = io_loop.add_timeout(timeout, on_timeout) waiter.add_done_callback( lambda _: io_loop.remove_timeout(timeout_handle) ) return waiter
<SYSTEM_TASK:> Attempt to lock. Returns an awaitable. <END_TASK> <USER_TASK:> Description: def acquire( self, timeout: Union[float, datetime.timedelta] = None ) -> Awaitable[_ReleasingContextManager]: """Attempt to lock. Returns an awaitable. Returns an awaitable, which raises `tornado.util.TimeoutError` after a timeout. """
return self._block.acquire(timeout)
<SYSTEM_TASK:> Read a single HTTP response. <END_TASK> <USER_TASK:> Description: def read_response(self, delegate: httputil.HTTPMessageDelegate) -> Awaitable[bool]: """Read a single HTTP response. Typical client-mode usage is to write a request using `write_headers`, `write`, and `finish`, and then call ``read_response``. :arg delegate: a `.HTTPMessageDelegate` Returns a `.Future` that resolves to a bool after the full response has been read. The result is true if the stream is still open. """
if self.params.decompress: delegate = _GzipMessageDelegate(delegate, self.params.chunk_size) return self._read_message(delegate)
<SYSTEM_TASK:> Clears the callback attributes. <END_TASK> <USER_TASK:> Description: def _clear_callbacks(self) -> None: """Clears the callback attributes. This allows the request handler to be garbage collected more quickly in CPython by breaking up reference cycles. """
self._write_callback = None self._write_future = None # type: Optional[Future[None]] self._close_callback = None # type: Optional[Callable[[], None]] if self.stream is not None: self.stream.set_close_callback(None)
<SYSTEM_TASK:> Implements `.HTTPConnection.write`. <END_TASK> <USER_TASK:> Description: def write(self, chunk: bytes) -> "Future[None]": """Implements `.HTTPConnection.write`. For backwards compatibility it is allowed but deprecated to skip `write_headers` and instead call `write()` with a pre-encoded header block. """
future = None if self.stream.closed(): future = self._write_future = Future() self._write_future.set_exception(iostream.StreamClosedError()) self._write_future.exception() else: future = self._write_future = Future() self._pending_write = self.stream.write(self._format_chunk(chunk)) future_add_done_callback(self._pending_write, self._on_write_complete) return future
<SYSTEM_TASK:> Starts serving requests on this connection. <END_TASK> <USER_TASK:> Description: def start_serving(self, delegate: httputil.HTTPServerConnectionDelegate) -> None: """Starts serving requests on this connection. :arg delegate: a `.HTTPServerConnectionDelegate` """
assert isinstance(delegate, httputil.HTTPServerConnectionDelegate) fut = gen.convert_yielded(self._server_request_loop(delegate)) self._serving_future = fut # Register the future on the IOLoop so its errors get logged. self.stream.io_loop.add_future(fut, lambda f: f.result())
<SYSTEM_TASK:> Client-side websocket support. <END_TASK> <USER_TASK:> Description: def websocket_connect( url: Union[str, httpclient.HTTPRequest], callback: Callable[["Future[WebSocketClientConnection]"], None] = None, connect_timeout: float = None, on_message_callback: Callable[[Union[None, str, bytes]], None] = None, compression_options: Dict[str, Any] = None, ping_interval: float = None, ping_timeout: float = None, max_message_size: int = _default_max_message_size, subprotocols: List[str] = None, ) -> "Awaitable[WebSocketClientConnection]": """Client-side websocket support. Takes a url and returns a Future whose result is a `WebSocketClientConnection`. ``compression_options`` is interpreted in the same way as the return value of `.WebSocketHandler.get_compression_options`. The connection supports two styles of operation. In the coroutine style, the application typically calls `~.WebSocketClientConnection.read_message` in a loop:: conn = yield websocket_connect(url) while True: msg = yield conn.read_message() if msg is None: break # Do something with msg In the callback style, pass an ``on_message_callback`` to ``websocket_connect``. In both styles, a message of ``None`` indicates that the connection has been closed. ``subprotocols`` may be a list of strings specifying proposed subprotocols. The selected protocol may be found on the ``selected_subprotocol`` attribute of the connection object when the connection is complete. .. versionchanged:: 3.2 Also accepts ``HTTPRequest`` objects in place of urls. .. versionchanged:: 4.1 Added ``compression_options`` and ``on_message_callback``. .. versionchanged:: 4.5 Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size`` arguments, which have the same meaning as in `WebSocketHandler`. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. .. versionchanged:: 5.1 Added the ``subprotocols`` argument. """
if isinstance(url, httpclient.HTTPRequest): assert connect_timeout is None request = url # Copy and convert the headers dict/object (see comments in # AsyncHTTPClient.fetch) request.headers = httputil.HTTPHeaders(request.headers) else: request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout) request = cast( httpclient.HTTPRequest, httpclient._RequestProxy(request, httpclient.HTTPRequest._DEFAULTS), ) conn = WebSocketClientConnection( request, on_message_callback=on_message_callback, compression_options=compression_options, ping_interval=ping_interval, ping_timeout=ping_timeout, max_message_size=max_message_size, subprotocols=subprotocols, ) if callback is not None: IOLoop.current().add_future(conn.connect_future, callback) return conn.connect_future
<SYSTEM_TASK:> Closes this Web Socket. <END_TASK> <USER_TASK:> Description: def close(self, code: int = None, reason: str = None) -> None: """Closes this Web Socket. Once the close handshake is successful the socket will be closed. ``code`` may be a numeric status code, taken from the values defined in `RFC 6455 section 7.4.1 <https://tools.ietf.org/html/rfc6455#section-7.4.1>`_. ``reason`` may be a textual message about why the connection is closing. These values are made available to the client, but are not otherwise interpreted by the websocket protocol. .. versionchanged:: 4.0 Added the ``code`` and ``reason`` arguments. """
if self.ws_connection: self.ws_connection.close(code, reason) self.ws_connection = None
<SYSTEM_TASK:> Override to enable support for allowing alternate origins. <END_TASK> <USER_TASK:> Description: def check_origin(self, origin: str) -> bool: """Override to enable support for allowing alternate origins. The ``origin`` argument is the value of the ``Origin`` HTTP header, the url responsible for initiating this request. This method is not called for clients that do not send this header; such requests are always allowed (because all browsers that implement WebSockets support this header, and non-browser clients do not have the same cross-site security concerns). Should return ``True`` to accept the request or ``False`` to reject it. By default, rejects all requests with an origin on a host other than this one. This is a security protection against cross site scripting attacks on browsers, since WebSockets are allowed to bypass the usual same-origin policies and don't use CORS headers. .. warning:: This is an important security measure; don't disable it without understanding the security implications. In particular, if your authentication is cookie-based, you must either restrict the origins allowed by ``check_origin()`` or implement your own XSRF-like protection for websocket connections. See `these <https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_ `articles <https://devcenter.heroku.com/articles/websocket-security>`_ for more. To accept all cross-origin traffic (which was the default prior to Tornado 4.0), simply override this method to always return ``True``:: def check_origin(self, origin): return True To allow connections from any subdomain of your site, you might do something like:: def check_origin(self, origin): parsed_origin = urllib.parse.urlparse(origin) return parsed_origin.netloc.endswith(".mydomain.com") .. versionadded:: 4.0 """
parsed_origin = urlparse(origin) origin = parsed_origin.netloc origin = origin.lower() host = self.request.headers.get("Host") # Check to see that origin matches host directly, including ports return origin == host
<SYSTEM_TASK:> Set the no-delay flag for this stream. <END_TASK> <USER_TASK:> Description: def set_nodelay(self, value: bool) -> None: """Set the no-delay flag for this stream. By default, small messages may be delayed and/or combined to minimize the number of packets sent. This can sometimes cause 200-500ms delays due to the interaction between Nagle's algorithm and TCP delayed ACKs. To reduce this delay (at the expense of possibly increasing bandwidth usage), call ``self.set_nodelay(True)`` once the websocket connection is established. See `.BaseIOStream.set_nodelay` for additional details. .. versionadded:: 3.1 """
assert self.ws_connection is not None self.ws_connection.set_nodelay(value)
<SYSTEM_TASK:> Runs the given callback with exception handling. <END_TASK> <USER_TASK:> Description: def _run_callback( self, callback: Callable, *args: Any, **kwargs: Any ) -> "Optional[Future[Any]]": """Runs the given callback with exception handling. If the callback is a coroutine, returns its Future. On error, aborts the websocket connection and returns None. """
try: result = callback(*args, **kwargs) except Exception: self.handler.log_exception(*sys.exc_info()) self._abort() return None else: if result is not None: result = gen.convert_yielded(result) assert self.stream is not None self.stream.io_loop.add_future(result, lambda f: f.result()) return result
<SYSTEM_TASK:> Instantly aborts the WebSocket connection by closing the socket <END_TASK> <USER_TASK:> Description: def _abort(self) -> None: """Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True self.server_terminated = True if self.stream is not None: self.stream.close() # forcibly tear down the connection self.close()
<SYSTEM_TASK:> Verifies all invariant- and required headers <END_TASK> <USER_TASK:> Description: def _handle_websocket_headers(self, handler: WebSocketHandler) -> None: """Verifies all invariant- and required headers If a header is missing or have an incorrect value ValueError will be raised """
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version") if not all(map(lambda f: handler.request.headers.get(f), fields)): raise ValueError("Missing/Invalid WebSocket headers")
<SYSTEM_TASK:> Computes the value for the Sec-WebSocket-Accept header, <END_TASK> <USER_TASK:> Description: def compute_accept_value(key: Union[str, bytes]) -> str: """Computes the value for the Sec-WebSocket-Accept header, given the value for Sec-WebSocket-Key. """
sha1 = hashlib.sha1() sha1.update(utf8(key)) sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value return native_str(base64.b64encode(sha1.digest()))
<SYSTEM_TASK:> Process the headers sent by the server to this client connection. <END_TASK> <USER_TASK:> Description: def _process_server_headers( self, key: Union[str, bytes], headers: httputil.HTTPHeaders ) -> None: """Process the headers sent by the server to this client connection. 'key' is the websocket handshake challenge/response key. """
assert headers["Upgrade"].lower() == "websocket" assert headers["Connection"].lower() == "upgrade" accept = self.compute_accept_value(key) assert headers["Sec-Websocket-Accept"] == accept extensions = self._parse_extensions_header(headers) for ext in extensions: if ext[0] == "permessage-deflate" and self._compression_options is not None: self._create_compressors("client", ext[1]) else: raise ValueError("unsupported extension %r", ext) self.selected_subprotocol = headers.get("Sec-WebSocket-Protocol", None)
<SYSTEM_TASK:> Execute on_message, returning its Future if it is a coroutine. <END_TASK> <USER_TASK:> Description: def _handle_message(self, opcode: int, data: bytes) -> "Optional[Future[None]]": """Execute on_message, returning its Future if it is a coroutine."""
if self.client_terminated: return None if self._frame_compressed: assert self._decompressor is not None try: data = self._decompressor.decompress(data) except _DecompressTooLargeError: self.close(1009, "message too big after decompression") self._abort() return None if opcode == 0x1: # UTF-8 data self._message_bytes_in += len(data) try: decoded = data.decode("utf-8") except UnicodeDecodeError: self._abort() return None return self._run_callback(self.handler.on_message, decoded) elif opcode == 0x2: # Binary data self._message_bytes_in += len(data) return self._run_callback(self.handler.on_message, data) elif opcode == 0x8: # Close self.client_terminated = True if len(data) >= 2: self.close_code = struct.unpack(">H", data[:2])[0] if len(data) > 2: self.close_reason = to_unicode(data[2:]) # Echo the received close code, if any (RFC 6455 section 5.5.1). self.close(self.close_code) elif opcode == 0x9: # Ping try: self._write_frame(True, 0xA, data) except StreamClosedError: self._abort() self._run_callback(self.handler.on_ping, data) elif opcode == 0xA: # Pong self.last_pong = IOLoop.current().time() return self._run_callback(self.handler.on_pong, data) else: self._abort() return None
<SYSTEM_TASK:> Return ``True`` if this connection is closing. <END_TASK> <USER_TASK:> Description: def is_closing(self) -> bool: """Return ``True`` if this connection is closing. The connection is considered closing if either side has initiated its closing handshake or if the stream has been shut down uncleanly. """
return self.stream.closed() or self.client_terminated or self.server_terminated
<SYSTEM_TASK:> Start sending periodic pings to keep the connection alive <END_TASK> <USER_TASK:> Description: def start_pinging(self) -> None: """Start sending periodic pings to keep the connection alive"""
assert self.ping_interval is not None if self.ping_interval > 0: self.last_ping = self.last_pong = IOLoop.current().time() self.ping_callback = PeriodicCallback( self.periodic_ping, self.ping_interval * 1000 ) self.ping_callback.start()
<SYSTEM_TASK:> Send a ping to keep the websocket alive <END_TASK> <USER_TASK:> Description: def periodic_ping(self) -> None: """Send a ping to keep the websocket alive Called periodically if the websocket_ping_interval is set and non-zero. """
if self.is_closing() and self.ping_callback is not None: self.ping_callback.stop() return # Check for timeout on pong. Make sure that we really have # sent a recent ping in case the machine with both server and # client has been suspended since the last ping. now = IOLoop.current().time() since_last_pong = now - self.last_pong since_last_ping = now - self.last_ping assert self.ping_interval is not None assert self.ping_timeout is not None if ( since_last_ping < 2 * self.ping_interval and since_last_pong > self.ping_timeout ): self.close() return self.write_ping(b"") self.last_ping = now
<SYSTEM_TASK:> Sends a message to the WebSocket server. <END_TASK> <USER_TASK:> Description: def write_message( self, message: Union[str, bytes], binary: bool = False ) -> "Future[None]": """Sends a message to the WebSocket server. If the stream is closed, raises `WebSocketClosedError`. Returns a `.Future` which can be used for flow control. .. versionchanged:: 5.0 Exception raised on a closed stream changed from `.StreamClosedError` to `WebSocketClosedError`. """
return self.protocol.write_message(message, binary=binary)
<SYSTEM_TASK:> Reads a message from the WebSocket server. <END_TASK> <USER_TASK:> Description: def read_message( self, callback: Callable[["Future[Union[None, str, bytes]]"], None] = None ) -> Awaitable[Union[None, str, bytes]]: """Reads a message from the WebSocket server. If on_message_callback was specified at WebSocket initialization, this function will never return messages Returns a future whose result is the message, or None if the connection is closed. If a callback argument is given it will be called with the future when it is ready. """
awaitable = self.read_queue.get() if callback is not None: self.io_loop.add_future(asyncio.ensure_future(awaitable), callback) return awaitable
<SYSTEM_TASK:> Defines an option in the global namespace. <END_TASK> <USER_TASK:> Description: def define( name: str, default: Any = None, type: type = None, help: str = None, metavar: str = None, multiple: bool = False, group: str = None, callback: Callable[[Any], None] = None, ) -> None: """Defines an option in the global namespace. See `OptionParser.define`. """
return options.define( name, default=default, type=type, help=help, metavar=metavar, multiple=multiple, group=group, callback=callback, )
<SYSTEM_TASK:> Parses global options from the command line. <END_TASK> <USER_TASK:> Description: def parse_command_line(args: List[str] = None, final: bool = True) -> List[str]: """Parses global options from the command line. See `OptionParser.parse_command_line`. """
return options.parse_command_line(args, final=final)
<SYSTEM_TASK:> The set of option-groups created by ``define``. <END_TASK> <USER_TASK:> Description: def groups(self) -> Set[str]: """The set of option-groups created by ``define``. .. versionadded:: 3.1 """
return set(opt.group_name for opt in self._options.values())
<SYSTEM_TASK:> The names and values of options in a group. <END_TASK> <USER_TASK:> Description: def group_dict(self, group: str) -> Dict[str, Any]: """The names and values of options in a group. Useful for copying options into Application settings:: from tornado.options import define, parse_command_line, options define('template_path', group='application') define('static_path', group='application') parse_command_line() application = Application( handlers, **options.group_dict('application')) .. versionadded:: 3.1 """
return dict( (opt.name, opt.value()) for name, opt in self._options.items() if not group or group == opt.group_name )
<SYSTEM_TASK:> The names and values of all options. <END_TASK> <USER_TASK:> Description: def as_dict(self) -> Dict[str, Any]: """The names and values of all options. .. versionadded:: 3.1 """
return dict((opt.name, opt.value()) for name, opt in self._options.items())
<SYSTEM_TASK:> Defines a new command line option. <END_TASK> <USER_TASK:> Description: def define( self, name: str, default: Any = None, type: type = None, help: str = None, metavar: str = None, multiple: bool = False, group: str = None, callback: Callable[[Any], None] = None, ) -> None: """Defines a new command line option. ``type`` can be any of `str`, `int`, `float`, `bool`, `~datetime.datetime`, or `~datetime.timedelta`. If no ``type`` is given but a ``default`` is, ``type`` is the type of ``default``. Otherwise, ``type`` defaults to `str`. If ``multiple`` is True, the option value is a list of ``type`` instead of an instance of ``type``. ``help`` and ``metavar`` are used to construct the automatically generated command line help string. The help message is formatted like:: --name=METAVAR help string ``group`` is used to group the defined options in logical groups. By default, command line options are grouped by the file in which they are defined. Command line option names must be unique globally. If a ``callback`` is given, it will be run with the new value whenever the option is changed. This can be used to combine command-line and file-based options:: define("config", type=str, help="path to config file", callback=lambda path: parse_config_file(path, final=False)) With this definition, options in the file specified by ``--config`` will override options set earlier on the command line, but can be overridden by later flags. """
normalized = self._normalize_name(name) if normalized in self._options: raise Error( "Option %r already defined in %s" % (normalized, self._options[normalized].file_name) ) frame = sys._getframe(0) options_file = frame.f_code.co_filename # Can be called directly, or through top level define() fn, in which # case, step up above that frame to look for real caller. if ( frame.f_back.f_code.co_filename == options_file and frame.f_back.f_code.co_name == "define" ): frame = frame.f_back file_name = frame.f_back.f_code.co_filename if file_name == options_file: file_name = "" if type is None: if not multiple and default is not None: type = default.__class__ else: type = str if group: group_name = group # type: Optional[str] else: group_name = file_name option = _Option( name, file_name=file_name, default=default, type=type, help=help, metavar=metavar, multiple=multiple, group_name=group_name, callback=callback, ) self._options[normalized] = option
<SYSTEM_TASK:> Parses and loads the config file at the given path. <END_TASK> <USER_TASK:> Description: def parse_config_file(self, path: str, final: bool = True) -> None: """Parses and loads the config file at the given path. The config file contains Python code that will be executed (so it is **not safe** to use untrusted config files). Anything in the global namespace that matches a defined option will be used to set that option's value. Options may either be the specified type for the option or strings (in which case they will be parsed the same way as in `.parse_command_line`) Example (using the options defined in the top-level docs of this module):: port = 80 mysql_host = 'mydb.example.com:3306' # Both lists and comma-separated strings are allowed for # multiple=True. memcache_hosts = ['cache1.example.com:11011', 'cache2.example.com:11011'] memcache_hosts = 'cache1.example.com:11011,cache2.example.com:11011' If ``final`` is ``False``, parse callbacks will not be run. This is useful for applications that wish to combine configurations from multiple sources. .. note:: `tornado.options` is primarily a command-line library. Config file support is provided for applications that wish to use it, but applications that prefer config files may wish to look at other libraries instead. .. versionchanged:: 4.1 Config files are now always interpreted as utf-8 instead of the system default encoding. .. versionchanged:: 4.4 The special variable ``__file__`` is available inside config files, specifying the absolute path to the config file itself. .. versionchanged:: 5.1 Added the ability to set options via strings in config files. """
config = {"__file__": os.path.abspath(path)} with open(path, "rb") as f: exec_in(native_str(f.read()), config, config) for name in config: normalized = self._normalize_name(name) if normalized in self._options: option = self._options[normalized] if option.multiple: if not isinstance(config[name], (list, str)): raise Error( "Option %r is required to be a list of %s " "or a comma-separated string" % (option.name, option.type.__name__) ) if type(config[name]) == str and option.type != str: option.parse(config[name]) else: option.set(config[name]) if final: self.run_parse_callbacks()
<SYSTEM_TASK:> Convert a SQL row to an object supporting dict and attribute access. <END_TASK> <USER_TASK:> Description: def row_to_obj(self, row, cur): """Convert a SQL row to an object supporting dict and attribute access."""
obj = tornado.util.ObjectDict() for val, desc in zip(row, cur.description): obj[desc.name] = val return obj
<SYSTEM_TASK:> Query for a list of results. <END_TASK> <USER_TASK:> Description: async def query(self, stmt, *args): """Query for a list of results. Typical usage:: results = await self.query(...) Or:: for row in await self.query(...) """
with (await self.application.db.cursor()) as cur: await cur.execute(stmt, args) return [self.row_to_obj(row, cur) for row in await cur.fetchall()]
<SYSTEM_TASK:> Query for exactly one result. <END_TASK> <USER_TASK:> Description: async def queryone(self, stmt, *args): """Query for exactly one result. Raises NoResultError if there are no results, or ValueError if there are more than one. """
results = await self.query(stmt, *args) if len(results) == 0: raise NoResultError() elif len(results) > 1: raise ValueError("Expected 1 result, got %d" % len(results)) return results[0]
<SYSTEM_TASK:> Starts accepting connections on the given port. <END_TASK> <USER_TASK:> Description: def listen(self, port: int, address: str = "") -> None: """Starts accepting connections on the given port. This method may be called more than once to listen on multiple ports. `listen` takes effect immediately; it is not necessary to call `TCPServer.start` afterwards. It is, however, necessary to start the `.IOLoop`. """
sockets = bind_sockets(port, address=address) self.add_sockets(sockets)
<SYSTEM_TASK:> Makes this server start accepting connections on the given sockets. <END_TASK> <USER_TASK:> Description: def add_sockets(self, sockets: Iterable[socket.socket]) -> None: """Makes this server start accepting connections on the given sockets. The ``sockets`` parameter is a list of socket objects such as those returned by `~tornado.netutil.bind_sockets`. `add_sockets` is typically used in combination with that method and `tornado.process.fork_processes` to provide greater control over the initialization of a multi-process server. """
for sock in sockets: self._sockets[sock.fileno()] = sock self._handlers[sock.fileno()] = add_accept_handler( sock, self._handle_connection )
<SYSTEM_TASK:> Binds this server to the given port on the given address. <END_TASK> <USER_TASK:> Description: def bind( self, port: int, address: str = None, family: socket.AddressFamily = socket.AF_UNSPEC, backlog: int = 128, reuse_port: bool = False, ) -> None: """Binds this server to the given port on the given address. To start the server, call `start`. If you want to run this server in a single process, you can call `listen` as a shortcut to the sequence of `bind` and `start` calls. Address may be either an IP address or hostname. If it's a hostname, the server will listen on all IP addresses associated with the name. Address may be an empty string or None to listen on all available interfaces. Family may be set to either `socket.AF_INET` or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise both will be used if available. The ``backlog`` argument has the same meaning as for `socket.listen <socket.socket.listen>`. The ``reuse_port`` argument has the same meaning as for `.bind_sockets`. This method may be called multiple times prior to `start` to listen on multiple ports or interfaces. .. versionchanged:: 4.4 Added the ``reuse_port`` argument. """
sockets = bind_sockets( port, address=address, family=family, backlog=backlog, reuse_port=reuse_port ) if self._started: self.add_sockets(sockets) else: self._pending_sockets.extend(sockets)
<SYSTEM_TASK:> Starts this server in the `.IOLoop`. <END_TASK> <USER_TASK:> Description: def start(self, num_processes: Optional[int] = 1, max_restarts: int = None) -> None: """Starts this server in the `.IOLoop`. By default, we run the server in this process and do not fork any additional child process. If num_processes is ``None`` or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If num_processes is given and > 1, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the ``autoreload=True`` option to `tornado.web.Application` which defaults to True when ``debug=True``). When using multiple processes, no IOLoops can be created or referenced until after the call to ``TCPServer.start(n)``. Values of ``num_processes`` other than 1 are not supported on Windows. The ``max_restarts`` argument is passed to `.fork_processes`. .. versionchanged:: 6.0 Added ``max_restarts`` argument. """
assert not self._started self._started = True if num_processes != 1: process.fork_processes(num_processes, max_restarts) sockets = self._pending_sockets self._pending_sockets = [] self.add_sockets(sockets)