text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Use session object to perform 'get' request on url <END_TASK> <USER_TASK:> Description: async def bounded_fetch(session, url): """ Use session object to perform 'get' request on url """
async with sem, session.get(url) as response: return await response.json()
<SYSTEM_TASK:> Preserve shape of the image. <END_TASK> <USER_TASK:> Description: def preserve_shape(func): """Preserve shape of the image."""
@wraps(func) def wrapped_function(img, *args, **kwargs): shape = img.shape result = func(img, *args, **kwargs) result = result.reshape(shape) return result return wrapped_function
<SYSTEM_TASK:> Preserve dummy channel dim. <END_TASK> <USER_TASK:> Description: def preserve_channel_dim(func): """Preserve dummy channel dim."""
@wraps(func) def wrapped_function(img, *args, **kwargs): shape = img.shape result = func(img, *args, **kwargs) if len(shape) == 3 and shape[-1] == 1 and len(result.shape) == 2: result = np.expand_dims(result, axis=-1) return result return wrapped_function
<SYSTEM_TASK:> Flip a bounding box vertically around the x-axis. <END_TASK> <USER_TASK:> Description: def bbox_vflip(bbox, rows, cols): """Flip a bounding box vertically around the x-axis."""
x_min, y_min, x_max, y_max = bbox return [x_min, 1 - y_max, x_max, 1 - y_min]
<SYSTEM_TASK:> Flip a bounding box horizontally around the y-axis. <END_TASK> <USER_TASK:> Description: def bbox_hflip(bbox, rows, cols): """Flip a bounding box horizontally around the y-axis."""
x_min, y_min, x_max, y_max = bbox return [1 - x_max, y_min, 1 - x_min, y_max]
<SYSTEM_TASK:> Flip a bounding box either vertically, horizontally or both depending on the value of `d`. <END_TASK> <USER_TASK:> Description: def bbox_flip(bbox, d, rows, cols): """Flip a bounding box either vertically, horizontally or both depending on the value of `d`. Raises: ValueError: if value of `d` is not -1, 0 or 1. """
if d == 0: bbox = bbox_vflip(bbox, rows, cols) elif d == 1: bbox = bbox_hflip(bbox, rows, cols) elif d == -1: bbox = bbox_hflip(bbox, rows, cols) bbox = bbox_vflip(bbox, rows, cols) else: raise ValueError('Invalid d value {}. Valid values are -1, 0 and 1'.format(d)) return bbox
<SYSTEM_TASK:> Crop a bounding box using the provided coordinates of bottom-left and top-right corners in pixels and the <END_TASK> <USER_TASK:> Description: def crop_bbox_by_coords(bbox, crop_coords, crop_height, crop_width, rows, cols): """Crop a bounding box using the provided coordinates of bottom-left and top-right corners in pixels and the required height and width of the crop. """
bbox = denormalize_bbox(bbox, rows, cols) x_min, y_min, x_max, y_max = bbox x1, y1, x2, y2 = crop_coords cropped_bbox = [x_min - x1, y_min - y1, x_max - x1, y_max - y1] return normalize_bbox(cropped_bbox, crop_height, crop_width)
<SYSTEM_TASK:> Rotates a bounding box by angle degrees <END_TASK> <USER_TASK:> Description: def bbox_rotate(bbox, angle, rows, cols, interpolation): """Rotates a bounding box by angle degrees Args: bbox (tuple): A tuple (x_min, y_min, x_max, y_max). angle (int): Angle of rotation in degrees rows (int): Image rows. cols (int): Image cols. interpolation (int): interpolation method. return a tuple (x_min, y_min, x_max, y_max) """
scale = cols / float(rows) x = np.array([bbox[0], bbox[2], bbox[2], bbox[0]]) y = np.array([bbox[1], bbox[1], bbox[3], bbox[3]]) x = x - 0.5 y = y - 0.5 angle = np.deg2rad(angle) x_t = (np.cos(angle) * x * scale + np.sin(angle) * y) / scale y_t = (-np.sin(angle) * x * scale + np.cos(angle) * y) x_t = x_t + 0.5 y_t = y_t + 0.5 return [min(x_t), min(y_t), max(x_t), max(y_t)]
<SYSTEM_TASK:> Transposes a bounding box along given axis. <END_TASK> <USER_TASK:> Description: def bbox_transpose(bbox, axis, rows, cols): """Transposes a bounding box along given axis. Args: bbox (tuple): A tuple (x_min, y_min, x_max, y_max). axis (int): 0 - main axis, 1 - secondary axis. rows (int): Image rows. cols (int): Image cols. """
x_min, y_min, x_max, y_max = bbox if axis != 0 and axis != 1: raise ValueError('Axis must be either 0 or 1.') if axis == 0: bbox = [y_min, x_min, y_max, x_max] if axis == 1: bbox = [1 - y_max, 1 - x_max, 1 - y_min, 1 - x_min] return bbox
<SYSTEM_TASK:> Flip a keypoint vertically around the x-axis. <END_TASK> <USER_TASK:> Description: def keypoint_vflip(kp, rows, cols): """Flip a keypoint vertically around the x-axis."""
x, y, angle, scale = kp c = math.cos(angle) s = math.sin(angle) angle = math.atan2(-s, c) return [x, (rows - 1) - y, angle, scale]
<SYSTEM_TASK:> Flip a keypoint either vertically, horizontally or both depending on the value of `d`. <END_TASK> <USER_TASK:> Description: def keypoint_flip(bbox, d, rows, cols): """Flip a keypoint either vertically, horizontally or both depending on the value of `d`. Raises: ValueError: if value of `d` is not -1, 0 or 1. """
if d == 0: bbox = keypoint_vflip(bbox, rows, cols) elif d == 1: bbox = keypoint_hflip(bbox, rows, cols) elif d == -1: bbox = keypoint_hflip(bbox, rows, cols) bbox = keypoint_vflip(bbox, rows, cols) else: raise ValueError('Invalid d value {}. Valid values are -1, 0 and 1'.format(d)) return bbox
<SYSTEM_TASK:> Scales a keypoint by scale_x and scale_y. <END_TASK> <USER_TASK:> Description: def keypoint_scale(keypoint, scale_x, scale_y, **params): """Scales a keypoint by scale_x and scale_y."""
x, y, a, s = keypoint return [x * scale_x, y * scale_y, a, s * max(scale_x, scale_y)]
<SYSTEM_TASK:> Crop a keypoint using the provided coordinates of bottom-left and top-right corners in pixels and the <END_TASK> <USER_TASK:> Description: def crop_keypoint_by_coords(keypoint, crop_coords, crop_height, crop_width, rows, cols): """Crop a keypoint using the provided coordinates of bottom-left and top-right corners in pixels and the required height and width of the crop. """
x, y, a, s = keypoint x1, y1, x2, y2 = crop_coords cropped_keypoint = [x - x1, y - y1, a, s] return cropped_keypoint
<SYSTEM_TASK:> Unified rounding in all python versions. <END_TASK> <USER_TASK:> Description: def py3round(number): """Unified rounding in all python versions."""
if abs(round(number) - number) == 0.5: return int(2.0 * round(number / 2.0)) return int(round(number))
<SYSTEM_TASK:> Normalize coordinates of a bounding box. Divide x-coordinates by image width and y-coordinates <END_TASK> <USER_TASK:> Description: def normalize_bbox(bbox, rows, cols): """Normalize coordinates of a bounding box. Divide x-coordinates by image width and y-coordinates by image height. """
if rows == 0: raise ValueError('Argument rows cannot be zero') if cols == 0: raise ValueError('Argument cols cannot be zero') x_min, y_min, x_max, y_max = bbox[:4] normalized_bbox = [x_min / cols, y_min / rows, x_max / cols, y_max / rows] return normalized_bbox + list(bbox[4:])
<SYSTEM_TASK:> Normalize a list of bounding boxes. <END_TASK> <USER_TASK:> Description: def normalize_bboxes(bboxes, rows, cols): """Normalize a list of bounding boxes."""
return [normalize_bbox(bbox, rows, cols) for bbox in bboxes]
<SYSTEM_TASK:> Denormalize a list of bounding boxes. <END_TASK> <USER_TASK:> Description: def denormalize_bboxes(bboxes, rows, cols): """Denormalize a list of bounding boxes."""
return [denormalize_bbox(bbox, rows, cols) for bbox in bboxes]
<SYSTEM_TASK:> Calculate the area of a bounding box in pixels. <END_TASK> <USER_TASK:> Description: def calculate_bbox_area(bbox, rows, cols): """Calculate the area of a bounding box in pixels."""
bbox = denormalize_bbox(bbox, rows, cols) x_min, y_min, x_max, y_max = bbox[:4] area = (x_max - x_min) * (y_max - y_min) return area
<SYSTEM_TASK:> Filter bounding boxes and return only those boxes whose visibility after transformation is above <END_TASK> <USER_TASK:> Description: def filter_bboxes_by_visibility(original_shape, bboxes, transformed_shape, transformed_bboxes, threshold=0., min_area=0.): """Filter bounding boxes and return only those boxes whose visibility after transformation is above the threshold and minimal area of bounding box in pixels is more then min_area. Args: original_shape (tuple): original image shape bboxes (list): original bounding boxes transformed_shape(tuple): transformed image transformed_bboxes (list): transformed bounding boxes threshold (float): visibility threshold. Should be a value in the range [0.0, 1.0]. min_area (float): Minimal area threshold. """
img_height, img_width = original_shape[:2] transformed_img_height, transformed_img_width = transformed_shape[:2] visible_bboxes = [] for bbox, transformed_bbox in zip(bboxes, transformed_bboxes): if not all(0.0 <= value <= 1.0 for value in transformed_bbox[:4]): continue bbox_area = calculate_bbox_area(bbox, img_height, img_width) transformed_bbox_area = calculate_bbox_area(transformed_bbox, transformed_img_height, transformed_img_width) if transformed_bbox_area < min_area: continue visibility = transformed_bbox_area / bbox_area if visibility >= threshold: visible_bboxes.append(transformed_bbox) return visible_bboxes
<SYSTEM_TASK:> Convert a bounding box from the format used by albumentations to a format, specified in `target_format`. <END_TASK> <USER_TASK:> Description: def convert_bbox_from_albumentations(bbox, target_format, rows, cols, check_validity=False): """Convert a bounding box from the format used by albumentations to a format, specified in `target_format`. Args: bbox (list): bounding box with coordinates in the format used by albumentations target_format (str): required format of the output bounding box. Should be 'coco' or 'pascal_voc'. rows (int): image height cols (int): image width check_validity (bool): check if all boxes are valid boxes Note: The `coco` format of a bounding box looks like `[x_min, y_min, width, height]`, e.g. [97, 12, 150, 200]. The `pascal_voc` format of a bounding box looks like `[x_min, y_min, x_max, y_max]`, e.g. [97, 12, 247, 212]. Raises: ValueError: if `target_format` is not equal to `coco` or `pascal_voc`. """
if target_format not in {'coco', 'pascal_voc'}: raise ValueError( "Unknown target_format {}. Supported formats are: 'coco' and 'pascal_voc'".format(target_format) ) if check_validity: check_bbox(bbox) bbox = denormalize_bbox(bbox, rows, cols) if target_format == 'coco': x_min, y_min, x_max, y_max = bbox[:4] width = x_max - x_min height = y_max - y_min bbox = [x_min, y_min, width, height] + list(bbox[4:]) return bbox
<SYSTEM_TASK:> Convert a list bounding boxes from a format specified in `source_format` to the format used by albumentations <END_TASK> <USER_TASK:> Description: def convert_bboxes_to_albumentations(bboxes, source_format, rows, cols, check_validity=False): """Convert a list bounding boxes from a format specified in `source_format` to the format used by albumentations """
return [convert_bbox_to_albumentations(bbox, source_format, rows, cols, check_validity) for bbox in bboxes]
<SYSTEM_TASK:> Convert a list of bounding boxes from the format used by albumentations to a format, specified <END_TASK> <USER_TASK:> Description: def convert_bboxes_from_albumentations(bboxes, target_format, rows, cols, check_validity=False): """Convert a list of bounding boxes from the format used by albumentations to a format, specified in `target_format`. Args: bboxes (list): List of bounding box with coordinates in the format used by albumentations target_format (str): required format of the output bounding box. Should be 'coco' or 'pascal_voc'. rows (int): image height cols (int): image width check_validity (bool): check if all boxes are valid boxes """
return [convert_bbox_from_albumentations(bbox, target_format, rows, cols, check_validity) for bbox in bboxes]
<SYSTEM_TASK:> Check if bbox boundaries are in range 0, 1 and minimums are lesser then maximums <END_TASK> <USER_TASK:> Description: def check_bbox(bbox): """Check if bbox boundaries are in range 0, 1 and minimums are lesser then maximums"""
for name, value in zip(['x_min', 'y_min', 'x_max', 'y_max'], bbox[:4]): if not 0 <= value <= 1: raise ValueError( 'Expected {name} for bbox {bbox} ' 'to be in the range [0.0, 1.0], got {value}.'.format( bbox=bbox, name=name, value=value, ) ) x_min, y_min, x_max, y_max = bbox[:4] if x_max <= x_min: raise ValueError('x_max is less than or equal to x_min for bbox {bbox}.'.format( bbox=bbox, )) if y_max <= y_min: raise ValueError('y_max is less than or equal to y_min for bbox {bbox}.'.format( bbox=bbox, ))
<SYSTEM_TASK:> Remove bounding boxes that either lie outside of the visible area by more then min_visibility <END_TASK> <USER_TASK:> Description: def filter_bboxes(bboxes, rows, cols, min_area=0., min_visibility=0.): """Remove bounding boxes that either lie outside of the visible area by more then min_visibility or whose area in pixels is under the threshold set by `min_area`. Also it crops boxes to final image size. Args: bboxes (list): List of bounding box with coordinates in the format used by albumentations rows (int): Image rows. cols (int): Image cols. min_area (float): minimum area of a bounding box. All bounding boxes whose visible area in pixels is less than this value will be removed. Default: 0.0. min_visibility (float): minimum fraction of area for a bounding box to remain this box in list. Default: 0.0. """
resulting_boxes = [] for bbox in bboxes: transformed_box_area = calculate_bbox_area(bbox, rows, cols) bbox[:4] = np.clip(bbox[:4], 0, 1.) clipped_box_area = calculate_bbox_area(bbox, rows, cols) if not transformed_box_area or clipped_box_area / transformed_box_area <= min_visibility: continue else: bbox[:4] = np.clip(bbox[:4], 0, 1.) if calculate_bbox_area(bbox, rows, cols) <= min_area: continue resulting_boxes.append(bbox) return resulting_boxes
<SYSTEM_TASK:> Calculate union of bounding boxes. <END_TASK> <USER_TASK:> Description: def union_of_bboxes(height, width, bboxes, erosion_rate=0.0, to_int=False): """Calculate union of bounding boxes. Args: height (float): Height of image or space. width (float): Width of image or space. bboxes (list): List like bounding boxes. Format is `[x_min, y_min, x_max, y_max]`. erosion_rate (float): How much each bounding box can be shrinked, useful for erosive cropping. Set this in range [0, 1]. 0 will not be erosive at all, 1.0 can make any bbox to lose its volume. """
x1, y1 = width, height x2, y2 = 0, 0 for b in bboxes: w, h = b[2] - b[0], b[3] - b[1] lim_x1, lim_y1 = b[0] + erosion_rate * w, b[1] + erosion_rate * h lim_x2, lim_y2 = b[2] - erosion_rate * w, b[3] - erosion_rate * h x1, y1 = np.min([x1, lim_x1]), np.min([y1, lim_y1]) x2, y2 = np.max([x2, lim_x2]), np.max([y2, lim_y2]) return x1, y1, x2, y2
<SYSTEM_TASK:> Check if keypoint coordinates are in range [0, 1) <END_TASK> <USER_TASK:> Description: def check_keypoint(kp, rows, cols): """Check if keypoint coordinates are in range [0, 1)"""
for name, value, size in zip(['x', 'y'], kp[:2], [cols, rows]): if not 0 <= value < size: raise ValueError( 'Expected {name} for keypoint {kp} ' 'to be in the range [0.0, {size}], got {value}.'.format( kp=kp, name=name, value=value, size=size ) )
<SYSTEM_TASK:> Check if keypoints boundaries are in range [0, 1) <END_TASK> <USER_TASK:> Description: def check_keypoints(keypoints, rows, cols): """Check if keypoints boundaries are in range [0, 1)"""
for kp in keypoints: check_keypoint(kp, rows, cols)
<SYSTEM_TASK:> Command-line wrapper to re-run a script whenever its source changes. <END_TASK> <USER_TASK:> Description: def main() -> None: """Command-line wrapper to re-run a script whenever its source changes. Scripts may be specified by filename or module name:: python -m tornado.autoreload -m tornado.test.runtests python -m tornado.autoreload tornado/test/runtests.py Running a script with this wrapper is similar to calling `tornado.autoreload.wait` at the end of the script, but this wrapper can catch import-time problems like syntax errors that would otherwise prevent the script from reaching its call to `wait`. """
# Remember that we were launched with autoreload as main. # The main module can be tricky; set the variables both in our globals # (which may be __main__) and the real importable version. import tornado.autoreload global _autoreload_is_main global _original_argv, _original_spec tornado.autoreload._autoreload_is_main = _autoreload_is_main = True original_argv = sys.argv tornado.autoreload._original_argv = _original_argv = original_argv original_spec = getattr(sys.modules["__main__"], "__spec__", None) tornado.autoreload._original_spec = _original_spec = original_spec sys.argv = sys.argv[:] if len(sys.argv) >= 3 and sys.argv[1] == "-m": mode = "module" module = sys.argv[2] del sys.argv[1:3] elif len(sys.argv) >= 2: mode = "script" script = sys.argv[1] sys.argv = sys.argv[1:] else: print(_USAGE, file=sys.stderr) sys.exit(1) try: if mode == "module": import runpy runpy.run_module(module, run_name="__main__", alter_sys=True) elif mode == "script": with open(script) as f: # Execute the script in our namespace instead of creating # a new one so that something that tries to import __main__ # (e.g. the unittest module) will see names defined in the # script instead of just those defined in this module. global __file__ __file__ = script # If __package__ is defined, imports may be incorrectly # interpreted as relative to this module. global __package__ del __package__ exec_in(f.read(), globals(), globals()) except SystemExit as e: logging.basicConfig() gen_log.info("Script exited with status %s", e.code) except Exception as e: logging.basicConfig() gen_log.warning("Script exited with uncaught exception", exc_info=True) # If an exception occurred at import time, the file with the error # never made it into sys.modules and so we won't know to watch it. # Just to make sure we've covered everything, walk the stack trace # from the exception and watch every file. for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]): watch(filename) if isinstance(e, SyntaxError): # SyntaxErrors are special: their innermost stack frame is fake # so extract_tb won't see it and we have to get the filename # from the exception object. watch(e.filename) else: logging.basicConfig() gen_log.info("Script exited normally") # restore sys.argv so subsequent executions will include autoreload sys.argv = original_argv if mode == "module": # runpy did a fake import of the module as __main__, but now it's # no longer in sys.modules. Figure out where it is and watch it. loader = pkgutil.get_loader(module) if loader is not None: watch(loader.get_filename()) # type: ignore wait()
<SYSTEM_TASK:> Partition the ``addrinfo`` list by address family. <END_TASK> <USER_TASK:> Description: def split( addrinfo: List[Tuple] ) -> Tuple[ List[Tuple[socket.AddressFamily, Tuple]], List[Tuple[socket.AddressFamily, Tuple]], ]: """Partition the ``addrinfo`` list by address family. Returns two lists. The first list contains the first entry from ``addrinfo`` and all others with the same family, and the second list contains all other addresses (normally one list will be AF_INET and the other AF_INET6, although non-standard resolvers may return additional families). """
primary = [] secondary = [] primary_af = addrinfo[0][0] for af, addr in addrinfo: if af == primary_af: primary.append((af, addr)) else: secondary.append((af, addr)) return primary, secondary
<SYSTEM_TASK:> Connect to the given host and port. <END_TASK> <USER_TASK:> Description: async def connect( self, host: str, port: int, af: socket.AddressFamily = socket.AF_UNSPEC, ssl_options: Union[Dict[str, Any], ssl.SSLContext] = None, max_buffer_size: int = None, source_ip: str = None, source_port: int = None, timeout: Union[float, datetime.timedelta] = None, ) -> IOStream: """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). Using the ``source_ip`` kwarg, one can specify the source IP address to use when establishing the connection. In case the user needs to resolve and use a specific interface, it has to be handled outside of Tornado as this depends very much on the platform. Raises `TimeoutError` if the input future does not complete before ``timeout``, which may be specified in any form allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time relative to `.IOLoop.time`) Similarly, when the user requires a certain source port, it can be specified using the ``source_port`` arg. .. versionchanged:: 4.5 Added the ``source_ip`` and ``source_port`` arguments. .. versionchanged:: 5.0 Added the ``timeout`` argument. """
if timeout is not None: if isinstance(timeout, numbers.Real): timeout = IOLoop.current().time() + timeout elif isinstance(timeout, datetime.timedelta): timeout = IOLoop.current().time() + timeout.total_seconds() else: raise TypeError("Unsupported timeout %r" % timeout) if timeout is not None: addrinfo = await gen.with_timeout( timeout, self.resolver.resolve(host, port, af) ) else: addrinfo = await self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, functools.partial( self._create_stream, max_buffer_size, source_ip=source_ip, source_port=source_port, ), ) af, addr, stream = await connector.start(connect_timeout=timeout) # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: if timeout is not None: stream = await gen.with_timeout( timeout, stream.start_tls( False, ssl_options=ssl_options, server_hostname=host ), ) else: stream = await stream.start_tls( False, ssl_options=ssl_options, server_hostname=host ) return stream
<SYSTEM_TASK:> Close all open connections and asynchronously wait for them to finish. <END_TASK> <USER_TASK:> Description: async def close_all_connections(self) -> None: """Close all open connections and asynchronously wait for them to finish. This method is used in combination with `~.TCPServer.stop` to support clean shutdowns (especially for unittests). Typical usage would call ``stop()`` first to stop accepting new connections, then ``await close_all_connections()`` to wait for existing connections to finish. This method does not currently close open websocket connections. Note that this method is a coroutine and must be caled with ``await``. """
while self._connections: # Peek at an arbitrary element of the set conn = next(iter(self._connections)) await conn.close()
<SYSTEM_TASK:> Rewrite the ``remote_ip`` and ``protocol`` fields. <END_TASK> <USER_TASK:> Description: def _apply_xheaders(self, headers: httputil.HTTPHeaders) -> None: """Rewrite the ``remote_ip`` and ``protocol`` fields."""
# Squid uses X-Forwarded-For, others use X-Real-Ip ip = headers.get("X-Forwarded-For", self.remote_ip) # Skip trusted downstream hosts in X-Forwarded-For list for ip in (cand.strip() for cand in reversed(ip.split(","))): if ip not in self.trusted_downstream: break ip = headers.get("X-Real-Ip", ip) if netutil.is_valid_ip(ip): self.remote_ip = ip # AWS uses X-Forwarded-Proto proto_header = headers.get( "X-Scheme", headers.get("X-Forwarded-Proto", self.protocol) ) if proto_header: # use only the last proto entry if there is more than one # TODO: support trusting mutiple layers of proxied protocol proto_header = proto_header.split(",")[-1].strip() if proto_header in ("http", "https"): self.protocol = proto_header
<SYSTEM_TASK:> Undo changes from `_apply_xheaders`. <END_TASK> <USER_TASK:> Description: def _unapply_xheaders(self) -> None: """Undo changes from `_apply_xheaders`. Xheaders are per-request so they should not leak to the next request on the same connection. """
self.remote_ip = self._orig_remote_ip self.protocol = self._orig_protocol
<SYSTEM_TASK:> Sets the default locale. <END_TASK> <USER_TASK:> Description: def set_default_locale(code: str) -> None: """Sets the default locale. The default locale is assumed to be the language used for all strings in the system. The translations loaded from disk are mappings from the default locale to the destination locale. Consequently, you don't need to create a translation file for the default locale. """
global _default_locale global _supported_locales _default_locale = code _supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
<SYSTEM_TASK:> Loads translations from CSV files in a directory. <END_TASK> <USER_TASK:> Description: def load_translations(directory: str, encoding: str = None) -> None: """Loads translations from CSV files in a directory. Translations are strings with optional Python-style named placeholders (e.g., ``My name is %(name)s``) and their associated translations. The directory should have translation files of the form ``LOCALE.csv``, e.g. ``es_GT.csv``. The CSV files should have two or three columns: string, translation, and an optional plural indicator. Plural indicators should be one of "plural" or "singular". A given string can have both singular and plural forms. For example ``%(name)s liked this`` may have a different verb conjugation depending on whether %(name)s is one name or a list of names. There should be two rows in the CSV file for that string, one with plural indicator "singular", and one "plural". For strings with no verbs that would change on translation, simply use "unknown" or the empty string (or don't include the column at all). The file is read using the `csv` module in the default "excel" dialect. In this format there should not be spaces after the commas. If no ``encoding`` parameter is given, the encoding will be detected automatically (among UTF-8 and UTF-16) if the file contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM is present. Example translation ``es_LA.csv``:: "I love you","Te amo" "%(name)s liked this","A %(name)s les gustó esto","plural" "%(name)s liked this","A %(name)s le gustó esto","singular" .. versionchanged:: 4.3 Added ``encoding`` parameter. Added support for BOM-based encoding detection, UTF-16, and UTF-8-with-BOM. """
global _translations global _supported_locales _translations = {} for path in os.listdir(directory): if not path.endswith(".csv"): continue locale, extension = path.split(".") if not re.match("[a-z]+(_[A-Z]+)?$", locale): gen_log.error( "Unrecognized locale %r (path: %s)", locale, os.path.join(directory, path), ) continue full_path = os.path.join(directory, path) if encoding is None: # Try to autodetect encoding based on the BOM. with open(full_path, "rb") as bf: data = bf.read(len(codecs.BOM_UTF16_LE)) if data in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): encoding = "utf-16" else: # utf-8-sig is "utf-8 with optional BOM". It's discouraged # in most cases but is common with CSV files because Excel # cannot read utf-8 files without a BOM. encoding = "utf-8-sig" # python 3: csv.reader requires a file open in text mode. # Specify an encoding to avoid dependence on $LANG environment variable. with open(full_path, encoding=encoding) as f: _translations[locale] = {} for i, row in enumerate(csv.reader(f)): if not row or len(row) < 2: continue row = [escape.to_unicode(c).strip() for c in row] english, translation = row[:2] if len(row) > 2: plural = row[2] or "unknown" else: plural = "unknown" if plural not in ("plural", "singular", "unknown"): gen_log.error( "Unrecognized plural indicator %r in %s line %d", plural, path, i + 1, ) continue _translations[locale].setdefault(plural, {})[english] = translation _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) gen_log.debug("Supported locales: %s", sorted(_supported_locales))
<SYSTEM_TASK:> Loads translations from `gettext`'s locale tree <END_TASK> <USER_TASK:> Description: def load_gettext_translations(directory: str, domain: str) -> None: """Loads translations from `gettext`'s locale tree Locale tree is similar to system's ``/usr/share/locale``, like:: {directory}/{lang}/LC_MESSAGES/{domain}.mo Three steps are required to have your app translated: 1. Generate POT translation file:: xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc 2. Merge against existing POT file:: msgmerge old.po mydomain.po > new.po 3. Compile:: msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo """
global _translations global _supported_locales global _use_gettext _translations = {} for lang in os.listdir(directory): if lang.startswith("."): continue # skip .svn, etc if os.path.isfile(os.path.join(directory, lang)): continue try: os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo")) _translations[lang] = gettext.translation( domain, directory, languages=[lang] ) except Exception as e: gen_log.error("Cannot load translation for '%s': %s", lang, str(e)) continue _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) _use_gettext = True gen_log.debug("Supported locales: %s", sorted(_supported_locales))
<SYSTEM_TASK:> Returns the closest match for the given locale code. <END_TASK> <USER_TASK:> Description: def get_closest(cls, *locale_codes: str) -> "Locale": """Returns the closest match for the given locale code."""
for code in locale_codes: if not code: continue code = code.replace("-", "_") parts = code.split("_") if len(parts) > 2: continue elif len(parts) == 2: code = parts[0].lower() + "_" + parts[1].upper() if code in _supported_locales: return cls.get(code) if parts[0].lower() in _supported_locales: return cls.get(parts[0].lower()) return cls.get(_default_locale)
<SYSTEM_TASK:> Returns the Locale for the given locale code. <END_TASK> <USER_TASK:> Description: def get(cls, code: str) -> "Locale": """Returns the Locale for the given locale code. If it is not supported, we raise an exception. """
if code not in cls._cache: assert code in _supported_locales translations = _translations.get(code, None) if translations is None: locale = CSVLocale(code, {}) # type: Locale elif _use_gettext: locale = GettextLocale(code, translations) else: locale = CSVLocale(code, translations) cls._cache[code] = locale return cls._cache[code]
<SYSTEM_TASK:> Returns the translation for the given message for this locale. <END_TASK> <USER_TASK:> Description: def translate( self, message: str, plural_message: str = None, count: int = None ) -> str: """Returns the translation for the given message for this locale. If ``plural_message`` is given, you must also provide ``count``. We return ``plural_message`` when ``count != 1``, and we return the singular form for the given message when ``count == 1``. """
raise NotImplementedError()
<SYSTEM_TASK:> Formats the given date as a day of week. <END_TASK> <USER_TASK:> Description: def format_day( self, date: datetime.datetime, gmt_offset: int = 0, dow: bool = True ) -> bool: """Formats the given date as a day of week. Example: "Monday, January 22". You can remove the day of week with ``dow=False``. """
local_date = date - datetime.timedelta(minutes=gmt_offset) _ = self.translate if dow: return _("%(weekday)s, %(month_name)s %(day)s") % { "month_name": self._months[local_date.month - 1], "weekday": self._weekdays[local_date.weekday()], "day": str(local_date.day), } else: return _("%(month_name)s %(day)s") % { "month_name": self._months[local_date.month - 1], "day": str(local_date.day), }
<SYSTEM_TASK:> Returns a comma-separated list for the given list of parts. <END_TASK> <USER_TASK:> Description: def list(self, parts: Any) -> str: """Returns a comma-separated list for the given list of parts. The format is, e.g., "A, B and C", "A and B" or just "A" for lists of size 1. """
_ = self.translate if len(parts) == 0: return "" if len(parts) == 1: return parts[0] comma = u" \u0648 " if self.code.startswith("fa") else u", " return _("%(commas)s and %(last)s") % { "commas": comma.join(parts[:-1]), "last": parts[len(parts) - 1], }
<SYSTEM_TASK:> Returns a comma-separated number for the given integer. <END_TASK> <USER_TASK:> Description: def friendly_number(self, value: int) -> str: """Returns a comma-separated number for the given integer."""
if self.code not in ("en", "en_US"): return str(value) s = str(value) parts = [] while s: parts.append(s[-3:]) s = s[:-3] return ",".join(reversed(parts))
<SYSTEM_TASK:> Allows to set context for translation, accepts plural forms. <END_TASK> <USER_TASK:> Description: def pgettext( self, context: str, message: str, plural_message: str = None, count: int = None ) -> str: """Allows to set context for translation, accepts plural forms. Usage example:: pgettext("law", "right") pgettext("good", "right") Plural message example:: pgettext("organization", "club", "clubs", len(clubs)) pgettext("stick", "club", "clubs", len(clubs)) To generate POT file with context, add following options to step 1 of `load_gettext_translations` sequence:: xgettext [basic options] --keyword=pgettext:1c,2 --keyword=pgettext:1c,2,3 .. versionadded:: 4.2 """
if plural_message is not None: assert count is not None msgs_with_ctxt = ( "%s%s%s" % (context, CONTEXT_SEPARATOR, message), "%s%s%s" % (context, CONTEXT_SEPARATOR, plural_message), count, ) result = self.ngettext(*msgs_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = self.ngettext(message, plural_message, count) return result else: msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message) result = self.gettext(msg_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = message return result
<SYSTEM_TASK:> None-safe wrapper around url_unescape to handle unmatched optional <END_TASK> <USER_TASK:> Description: def _unquote_or_none(s: Optional[str]) -> Optional[bytes]: # noqa: F811 """None-safe wrapper around url_unescape to handle unmatched optional groups correctly. Note that args are passed as bytes so the handler can decide what encoding to use. """
if s is None: return s return url_unescape(s, encoding=None, plus=False)
<SYSTEM_TASK:> Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate` <END_TASK> <USER_TASK:> Description: def find_handler( self, request: httputil.HTTPServerRequest, **kwargs: Any ) -> Optional[httputil.HTTPMessageDelegate]: """Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate` that can serve the request. Routing implementations may pass additional kwargs to extend the routing logic. :arg httputil.HTTPServerRequest request: current HTTP request. :arg kwargs: additional keyword arguments passed by routing implementation. :returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to process the request. """
raise NotImplementedError()
<SYSTEM_TASK:> Appends new rules to the router. <END_TASK> <USER_TASK:> Description: def add_rules(self, rules: _RuleList) -> None: """Appends new rules to the router. :arg rules: a list of Rule instances (or tuples of arguments, which are passed to Rule constructor). """
for rule in rules: if isinstance(rule, (tuple, list)): assert len(rule) in (2, 3, 4) if isinstance(rule[0], basestring_type): rule = Rule(PathMatches(rule[0]), *rule[1:]) else: rule = Rule(*rule) self.rules.append(self.process_rule(rule))
<SYSTEM_TASK:> Returns an instance of `~.httputil.HTTPMessageDelegate` for a <END_TASK> <USER_TASK:> Description: def get_target_delegate( self, target: Any, request: httputil.HTTPServerRequest, **target_params: Any ) -> Optional[httputil.HTTPMessageDelegate]: """Returns an instance of `~.httputil.HTTPMessageDelegate` for a Rule's target. This method is called by `~.find_handler` and can be extended to provide additional target types. :arg target: a Rule's target. :arg httputil.HTTPServerRequest request: current request. :arg target_params: additional parameters that can be useful for `~.httputil.HTTPMessageDelegate` creation. """
if isinstance(target, Router): return target.find_handler(request, **target_params) elif isinstance(target, httputil.HTTPServerConnectionDelegate): assert request.connection is not None return target.start_request(request.server_connection, request.connection) elif callable(target): assert request.connection is not None return _CallableAdapter( partial(target, **target_params), request.connection ) return None
<SYSTEM_TASK:> Matches current instance against the request. <END_TASK> <USER_TASK:> Description: def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]: """Matches current instance against the request. :arg httputil.HTTPServerRequest request: current HTTP request :returns: a dict of parameters to be passed to the target handler (for example, ``handler_kwargs``, ``path_args``, ``path_kwargs`` can be passed for proper `~.web.RequestHandler` instantiation). An empty dict is a valid (and common) return value to indicate a match when the argument-passing features are not used. ``None`` must be returned to indicate that there is no match."""
raise NotImplementedError()
<SYSTEM_TASK:> Download the page at `url` and parse it for links. <END_TASK> <USER_TASK:> Description: async def get_links_from_url(url): """Download the page at `url` and parse it for links. Returned links have had the fragment after `#` removed, and have been made absolute so, e.g. the URL 'gen.html#tornado.gen.coroutine' becomes 'http://www.tornadoweb.org/en/stable/gen.html'. """
response = await httpclient.AsyncHTTPClient().fetch(url) print("fetched %s" % url) html = response.body.decode(errors="ignore") return [urljoin(url, remove_fragment(new_url)) for new_url in get_links(html)]
<SYSTEM_TASK:> Returns a list of messages newer than the given cursor. <END_TASK> <USER_TASK:> Description: def get_messages_since(self, cursor): """Returns a list of messages newer than the given cursor. ``cursor`` should be the ``id`` of the last message received. """
results = [] for msg in reversed(self.cache): if msg["id"] == cursor: break results.append(msg) results.reverse() return results
<SYSTEM_TASK:> Imports an object by name. <END_TASK> <USER_TASK:> Description: def import_object(name: str) -> Any: """Imports an object by name. ``import_object('x')`` is equivalent to ``import x``. ``import_object('x.y.z')`` is equivalent to ``from x.y import z``. >>> import tornado.escape >>> import_object('tornado.escape') is tornado.escape True >>> import_object('tornado.escape.utf8') is tornado.escape.utf8 True >>> import_object('tornado') is tornado True >>> import_object('tornado.missing_module') Traceback (most recent call last): ... ImportError: No module named missing_module """
if name.count(".") == 0: return __import__(name) parts = name.split(".") obj = __import__(".".join(parts[:-1]), fromlist=[parts[-1]]) try: return getattr(obj, parts[-1]) except AttributeError: raise ImportError("No module named %s" % parts[-1])
<SYSTEM_TASK:> Decompress a chunk, returning newly-available data. <END_TASK> <USER_TASK:> Description: def decompress(self, value: bytes, max_length: int = 0) -> bytes: """Decompress a chunk, returning newly-available data. Some data may be buffered for later processing; `flush` must be called when there is no more input data to ensure that all data was processed. If ``max_length`` is given, some input data may be left over in ``unconsumed_tail``; you must retrieve this value and pass it back to a future call to `decompress` if it is not empty. """
return self.decompressobj.decompress(value, max_length)
<SYSTEM_TASK:> Apply to `RequestHandler` subclasses to enable streaming body support. <END_TASK> <USER_TASK:> Description: def stream_request_body(cls: Type[RequestHandler]) -> Type[RequestHandler]: """Apply to `RequestHandler` subclasses to enable streaming body support. This decorator implies the following changes: * `.HTTPServerRequest.body` is undefined, and body arguments will not be included in `RequestHandler.get_argument`. * `RequestHandler.prepare` is called when the request headers have been read instead of after the entire body has been read. * The subclass must define a method ``data_received(self, data):``, which will be called zero or more times as data is available. Note that if the request has an empty body, ``data_received`` may not be called. * ``prepare`` and ``data_received`` may return Futures (such as via ``@gen.coroutine``, in which case the next method will not be called until those futures have completed. * The regular HTTP method (``post``, ``put``, etc) will be called after the entire body has been read. See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_ for example usage. """
# noqa: E501 if not issubclass(cls, RequestHandler): raise TypeError("expected subclass of RequestHandler, got %r", cls) cls._stream_request_body = True return cls
<SYSTEM_TASK:> Use this decorator to remove trailing slashes from the request path. <END_TASK> <USER_TASK:> Description: def removeslash( method: Callable[..., Optional[Awaitable[None]]] ) -> Callable[..., Optional[Awaitable[None]]]: """Use this decorator to remove trailing slashes from the request path. For example, a request to ``/foo/`` would redirect to ``/foo`` with this decorator. Your request handler mapping should use a regular expression like ``r'/foo/*'`` in conjunction with using the decorator. """
@functools.wraps(method) def wrapper( # type: ignore self: RequestHandler, *args, **kwargs ) -> Optional[Awaitable[None]]: if self.request.path.endswith("/"): if self.request.method in ("GET", "HEAD"): uri = self.request.path.rstrip("/") if uri: # don't try to redirect '/' to '' if self.request.query: uri += "?" + self.request.query self.redirect(uri, permanent=True) return None else: raise HTTPError(404) return method(self, *args, **kwargs) return wrapper
<SYSTEM_TASK:> Decorate methods with this to require that the user be logged in. <END_TASK> <USER_TASK:> Description: def authenticated( method: Callable[..., Optional[Awaitable[None]]] ) -> Callable[..., Optional[Awaitable[None]]]: """Decorate methods with this to require that the user be logged in. If the user is not logged in, they will be redirected to the configured `login url <RequestHandler.get_login_url>`. If you configure a login url with a query parameter, Tornado will assume you know what you're doing and use it as-is. If not, it will add a `next` parameter so the login page knows where to send you once you're logged in. """
@functools.wraps(method) def wrapper( # type: ignore self: RequestHandler, *args, **kwargs ) -> Optional[Awaitable[None]]: if not self.current_user: if self.request.method in ("GET", "HEAD"): url = self.get_login_url() if "?" not in url: if urllib.parse.urlsplit(url).scheme: # if login url is absolute, make next absolute too next_url = self.request.full_url() else: assert self.request.uri is not None next_url = self.request.uri url += "?" + urlencode(dict(next=next_url)) self.redirect(url) return None raise HTTPError(403) return method(self, *args, **kwargs) return wrapper
<SYSTEM_TASK:> Called in async handlers if the client closed the connection. <END_TASK> <USER_TASK:> Description: def on_connection_close(self) -> None: """Called in async handlers if the client closed the connection. Override this to clean up resources associated with long-lived connections. Note that this method is called only if the connection was closed during asynchronous processing; if you need to do cleanup after every request override `on_finish` instead. Proxies may keep a connection open for a time (perhaps indefinitely) after the client has gone away, so this method may not be called promptly after the end user closes their connection. """
if _has_stream_request_body(self.__class__): if not self.request._body_future.done(): self.request._body_future.set_exception(iostream.StreamClosedError()) self.request._body_future.exception()
<SYSTEM_TASK:> Resets all headers and content for this response. <END_TASK> <USER_TASK:> Description: def clear(self) -> None: """Resets all headers and content for this response."""
self._headers = httputil.HTTPHeaders( { "Server": "TornadoServer/%s" % tornado.version, "Content-Type": "text/html; charset=UTF-8", "Date": httputil.format_timestamp(time.time()), } ) self.set_default_headers() self._write_buffer = [] # type: List[bytes] self._status_code = 200 self._reason = httputil.responses[200]
<SYSTEM_TASK:> Sets the status code for our response. <END_TASK> <USER_TASK:> Description: def set_status(self, status_code: int, reason: str = None) -> None: """Sets the status code for our response. :arg int status_code: Response status code. :arg str reason: Human-readable reason phrase describing the status code. If ``None``, it will be filled in from `http.client.responses` or "Unknown". .. versionchanged:: 5.0 No longer validates that the response code is in `http.client.responses`. """
self._status_code = status_code if reason is not None: self._reason = escape.native_str(reason) else: self._reason = httputil.responses.get(status_code, "Unknown")
<SYSTEM_TASK:> Sets the given response header name and value. <END_TASK> <USER_TASK:> Description: def set_header(self, name: str, value: _HeaderTypes) -> None: """Sets the given response header name and value. All header values are converted to strings (`datetime` objects are formatted according to the HTTP specification for the ``Date`` header). """
self._headers[name] = self._convert_header_value(value)
<SYSTEM_TASK:> Adds the given response header and value. <END_TASK> <USER_TASK:> Description: def add_header(self, name: str, value: _HeaderTypes) -> None: """Adds the given response header and value. Unlike `set_header`, `add_header` may be called multiple times to return multiple values for the same header. """
self._headers.add(name, self._convert_header_value(value))
<SYSTEM_TASK:> Clears an outgoing header, undoing a previous `set_header` call. <END_TASK> <USER_TASK:> Description: def clear_header(self, name: str) -> None: """Clears an outgoing header, undoing a previous `set_header` call. Note that this method does not apply to multi-valued headers set by `add_header`. """
if name in self._headers: del self._headers[name]
<SYSTEM_TASK:> Returns a list of the arguments with the given name. <END_TASK> <USER_TASK:> Description: def get_arguments(self, name: str, strip: bool = True) -> List[str]: """Returns a list of the arguments with the given name. If the argument is not present, returns an empty list. This method searches both the query and body arguments. """
# Make sure `get_arguments` isn't accidentally being called with a # positional argument that's assumed to be a default (like in # `get_argument`.) assert isinstance(strip, bool) return self._get_arguments(name, self.request.arguments, strip)
<SYSTEM_TASK:> Returns the value of the argument with the given name <END_TASK> <USER_TASK:> Description: def get_body_argument( self, name: str, default: Union[None, str, _ArgDefaultMarker] = _ARG_DEFAULT, strip: bool = True, ) -> Optional[str]: """Returns the value of the argument with the given name from the request body. If default is not provided, the argument is considered to be required, and we raise a `MissingArgumentError` if it is missing. If the argument appears in the url more than once, we return the last value. .. versionadded:: 3.2 """
return self._get_argument(name, default, self.request.body_arguments, strip)
<SYSTEM_TASK:> Returns a list of the body arguments with the given name. <END_TASK> <USER_TASK:> Description: def get_body_arguments(self, name: str, strip: bool = True) -> List[str]: """Returns a list of the body arguments with the given name. If the argument is not present, returns an empty list. .. versionadded:: 3.2 """
return self._get_arguments(name, self.request.body_arguments, strip)
<SYSTEM_TASK:> Returns the value of the argument with the given name <END_TASK> <USER_TASK:> Description: def get_query_argument( self, name: str, default: Union[None, str, _ArgDefaultMarker] = _ARG_DEFAULT, strip: bool = True, ) -> Optional[str]: """Returns the value of the argument with the given name from the request query string. If default is not provided, the argument is considered to be required, and we raise a `MissingArgumentError` if it is missing. If the argument appears in the url more than once, we return the last value. .. versionadded:: 3.2 """
return self._get_argument(name, default, self.request.query_arguments, strip)
<SYSTEM_TASK:> Returns a list of the query arguments with the given name. <END_TASK> <USER_TASK:> Description: def get_query_arguments(self, name: str, strip: bool = True) -> List[str]: """Returns a list of the query arguments with the given name. If the argument is not present, returns an empty list. .. versionadded:: 3.2 """
return self._get_arguments(name, self.request.query_arguments, strip)
<SYSTEM_TASK:> Decodes an argument from the request. <END_TASK> <USER_TASK:> Description: def decode_argument(self, value: bytes, name: str = None) -> str: """Decodes an argument from the request. The argument has been percent-decoded and is now a byte string. By default, this method decodes the argument as utf-8 and returns a unicode string, but this may be overridden in subclasses. This method is used as a filter for both `get_argument()` and for values extracted from the url and passed to `get()`/`post()`/etc. The name of the argument is provided if known, but may be None (e.g. for unnamed groups in the url regex). """
try: return _unicode(value) except UnicodeDecodeError: raise HTTPError( 400, "Invalid unicode in %s: %r" % (name or "url", value[:40]) )
<SYSTEM_TASK:> Returns the value of the request cookie with the given name. <END_TASK> <USER_TASK:> Description: def get_cookie(self, name: str, default: str = None) -> Optional[str]: """Returns the value of the request cookie with the given name. If the named cookie is not present, returns ``default``. This method only returns cookies that were present in the request. It does not see the outgoing cookies set by `set_cookie` in this handler. """
if self.request.cookies is not None and name in self.request.cookies: return self.request.cookies[name].value return default
<SYSTEM_TASK:> Deletes the cookie with the given name. <END_TASK> <USER_TASK:> Description: def clear_cookie(self, name: str, path: str = "/", domain: str = None) -> None: """Deletes the cookie with the given name. Due to limitations of the cookie protocol, you must pass the same path and domain to clear a cookie as were used when that cookie was set (but there is no way to find out on the server side which values were used for a given cookie). Similar to `set_cookie`, the effect of this method will not be seen until the following request. """
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365) self.set_cookie(name, value="", path=path, expires=expires, domain=domain)
<SYSTEM_TASK:> Deletes all the cookies the user sent with this request. <END_TASK> <USER_TASK:> Description: def clear_all_cookies(self, path: str = "/", domain: str = None) -> None: """Deletes all the cookies the user sent with this request. See `clear_cookie` for more information on the path and domain parameters. Similar to `set_cookie`, the effect of this method will not be seen until the following request. .. versionchanged:: 3.2 Added the ``path`` and ``domain`` parameters. """
for name in self.request.cookies: self.clear_cookie(name, path=path, domain=domain)
<SYSTEM_TASK:> Signs and timestamps a cookie so it cannot be forged. <END_TASK> <USER_TASK:> Description: def set_secure_cookie( self, name: str, value: Union[str, bytes], expires_days: int = 30, version: int = None, **kwargs: Any ) -> None: """Signs and timestamps a cookie so it cannot be forged. You must specify the ``cookie_secret`` setting in your Application to use this method. It should be a long, random sequence of bytes to be used as the HMAC secret for the signature. To read a cookie set with this method, use `get_secure_cookie()`. Note that the ``expires_days`` parameter sets the lifetime of the cookie in the browser, but is independent of the ``max_age_days`` parameter to `get_secure_cookie`. Secure cookies may contain arbitrary byte values, not just unicode strings (unlike regular cookies) Similar to `set_cookie`, the effect of this method will not be seen until the following request. .. versionchanged:: 3.2.1 Added the ``version`` argument. Introduced cookie version 2 and made it the default. """
self.set_cookie( name, self.create_signed_value(name, value, version=version), expires_days=expires_days, **kwargs )
<SYSTEM_TASK:> Signs and timestamps a string so it cannot be forged. <END_TASK> <USER_TASK:> Description: def create_signed_value( self, name: str, value: Union[str, bytes], version: int = None ) -> bytes: """Signs and timestamps a string so it cannot be forged. Normally used via set_secure_cookie, but provided as a separate method for non-cookie uses. To decode a value not stored as a cookie use the optional value argument to get_secure_cookie. .. versionchanged:: 3.2.1 Added the ``version`` argument. Introduced cookie version 2 and made it the default. """
self.require_setting("cookie_secret", "secure cookies") secret = self.application.settings["cookie_secret"] key_version = None if isinstance(secret, dict): if self.application.settings.get("key_version") is None: raise Exception("key_version setting must be used for secret_key dicts") key_version = self.application.settings["key_version"] return create_signed_value( secret, name, value, version=version, key_version=key_version )
<SYSTEM_TASK:> Returns the given signed cookie if it validates, or None. <END_TASK> <USER_TASK:> Description: def get_secure_cookie( self, name: str, value: str = None, max_age_days: int = 31, min_version: int = None, ) -> Optional[bytes]: """Returns the given signed cookie if it validates, or None. The decoded cookie value is returned as a byte string (unlike `get_cookie`). Similar to `get_cookie`, this method only returns cookies that were present in the request. It does not see outgoing cookies set by `set_secure_cookie` in this handler. .. versionchanged:: 3.2.1 Added the ``min_version`` argument. Introduced cookie version 2; both versions 1 and 2 are accepted by default. """
self.require_setting("cookie_secret", "secure cookies") if value is None: value = self.get_cookie(name) return decode_signed_value( self.application.settings["cookie_secret"], name, value, max_age_days=max_age_days, min_version=min_version, )
<SYSTEM_TASK:> Returns the signing key version of the secure cookie. <END_TASK> <USER_TASK:> Description: def get_secure_cookie_key_version( self, name: str, value: str = None ) -> Optional[int]: """Returns the signing key version of the secure cookie. The version is returned as int. """
self.require_setting("cookie_secret", "secure cookies") if value is None: value = self.get_cookie(name) if value is None: return None return get_signature_key_version(value)
<SYSTEM_TASK:> Writes the given chunk to the output buffer. <END_TASK> <USER_TASK:> Description: def write(self, chunk: Union[str, bytes, dict]) -> None: """Writes the given chunk to the output buffer. To write the output to the network, use the `flush()` method below. If the given chunk is a dictionary, we write it as JSON and set the Content-Type of the response to be ``application/json``. (if you want to send JSON as a different ``Content-Type``, call ``set_header`` *after* calling ``write()``). Note that lists are not converted to JSON because of a potential cross-site security vulnerability. All JSON output should be wrapped in a dictionary. More details at http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and https://github.com/facebook/tornado/issues/1009 """
if self._finished: raise RuntimeError("Cannot write() after finish()") if not isinstance(chunk, (bytes, unicode_type, dict)): message = "write() only accepts bytes, unicode, and dict objects" if isinstance(chunk, list): message += ( ". Lists not accepted for security reasons; see " + "http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write" # noqa: E501 ) raise TypeError(message) if isinstance(chunk, dict): chunk = escape.json_encode(chunk) self.set_header("Content-Type", "application/json; charset=UTF-8") chunk = utf8(chunk) self._write_buffer.append(chunk)
<SYSTEM_TASK:> Renders the template with the given arguments as the response. <END_TASK> <USER_TASK:> Description: def render(self, template_name: str, **kwargs: Any) -> "Future[None]": """Renders the template with the given arguments as the response. ``render()`` calls ``finish()``, so no other output methods can be called after it. Returns a `.Future` with the same semantics as the one returned by `finish`. Awaiting this `.Future` is optional. .. versionchanged:: 5.1 Now returns a `.Future` instead of ``None``. """
if self._finished: raise RuntimeError("Cannot render() after finish()") html = self.render_string(template_name, **kwargs) # Insert the additional JS and CSS added by the modules on the page js_embed = [] js_files = [] css_embed = [] css_files = [] html_heads = [] html_bodies = [] for module in getattr(self, "_active_modules", {}).values(): embed_part = module.embedded_javascript() if embed_part: js_embed.append(utf8(embed_part)) file_part = module.javascript_files() if file_part: if isinstance(file_part, (unicode_type, bytes)): js_files.append(_unicode(file_part)) else: js_files.extend(file_part) embed_part = module.embedded_css() if embed_part: css_embed.append(utf8(embed_part)) file_part = module.css_files() if file_part: if isinstance(file_part, (unicode_type, bytes)): css_files.append(_unicode(file_part)) else: css_files.extend(file_part) head_part = module.html_head() if head_part: html_heads.append(utf8(head_part)) body_part = module.html_body() if body_part: html_bodies.append(utf8(body_part)) if js_files: # Maintain order of JavaScript files given by modules js = self.render_linked_js(js_files) sloc = html.rindex(b"</body>") html = html[:sloc] + utf8(js) + b"\n" + html[sloc:] if js_embed: js_bytes = self.render_embed_js(js_embed) sloc = html.rindex(b"</body>") html = html[:sloc] + js_bytes + b"\n" + html[sloc:] if css_files: css = self.render_linked_css(css_files) hloc = html.index(b"</head>") html = html[:hloc] + utf8(css) + b"\n" + html[hloc:] if css_embed: css_bytes = self.render_embed_css(css_embed) hloc = html.index(b"</head>") html = html[:hloc] + css_bytes + b"\n" + html[hloc:] if html_heads: hloc = html.index(b"</head>") html = html[:hloc] + b"".join(html_heads) + b"\n" + html[hloc:] if html_bodies: hloc = html.index(b"</body>") html = html[:hloc] + b"".join(html_bodies) + b"\n" + html[hloc:] return self.finish(html)
<SYSTEM_TASK:> Default method used to render the final js links for the <END_TASK> <USER_TASK:> Description: def render_linked_js(self, js_files: Iterable[str]) -> str: """Default method used to render the final js links for the rendered webpage. Override this method in a sub-classed controller to change the output. """
paths = [] unique_paths = set() # type: Set[str] for path in js_files: if not is_absolute(path): path = self.static_url(path) if path not in unique_paths: paths.append(path) unique_paths.add(path) return "".join( '<script src="' + escape.xhtml_escape(p) + '" type="text/javascript"></script>' for p in paths )
<SYSTEM_TASK:> Default method used to render the final embedded js for the <END_TASK> <USER_TASK:> Description: def render_embed_js(self, js_embed: Iterable[bytes]) -> bytes: """Default method used to render the final embedded js for the rendered webpage. Override this method in a sub-classed controller to change the output. """
return ( b'<script type="text/javascript">\n//<![CDATA[\n' + b"\n".join(js_embed) + b"\n//]]>\n</script>" )
<SYSTEM_TASK:> Default method used to render the final css links for the <END_TASK> <USER_TASK:> Description: def render_linked_css(self, css_files: Iterable[str]) -> str: """Default method used to render the final css links for the rendered webpage. Override this method in a sub-classed controller to change the output. """
paths = [] unique_paths = set() # type: Set[str] for path in css_files: if not is_absolute(path): path = self.static_url(path) if path not in unique_paths: paths.append(path) unique_paths.add(path) return "".join( '<link href="' + escape.xhtml_escape(p) + '" ' 'type="text/css" rel="stylesheet"/>' for p in paths )
<SYSTEM_TASK:> Default method used to render the final embedded css for the <END_TASK> <USER_TASK:> Description: def render_embed_css(self, css_embed: Iterable[bytes]) -> bytes: """Default method used to render the final embedded css for the rendered webpage. Override this method in a sub-classed controller to change the output. """
return b'<style type="text/css">\n' + b"\n".join(css_embed) + b"\n</style>"
<SYSTEM_TASK:> Generate the given template with the given arguments. <END_TASK> <USER_TASK:> Description: def render_string(self, template_name: str, **kwargs: Any) -> bytes: """Generate the given template with the given arguments. We return the generated byte string (in utf8). To generate and write a template as a response, use render() above. """
# If no template_path is specified, use the path of the calling file template_path = self.get_template_path() if not template_path: frame = sys._getframe(0) web_file = frame.f_code.co_filename while frame.f_code.co_filename == web_file: frame = frame.f_back assert frame.f_code.co_filename is not None template_path = os.path.dirname(frame.f_code.co_filename) with RequestHandler._template_loader_lock: if template_path not in RequestHandler._template_loaders: loader = self.create_template_loader(template_path) RequestHandler._template_loaders[template_path] = loader else: loader = RequestHandler._template_loaders[template_path] t = loader.load(template_name) namespace = self.get_template_namespace() namespace.update(kwargs) return t.generate(**namespace)
<SYSTEM_TASK:> Returns a dictionary to be used as the default template namespace. <END_TASK> <USER_TASK:> Description: def get_template_namespace(self) -> Dict[str, Any]: """Returns a dictionary to be used as the default template namespace. May be overridden by subclasses to add or modify values. The results of this method will be combined with additional defaults in the `tornado.template` module and keyword arguments to `render` or `render_string`. """
namespace = dict( handler=self, request=self.request, current_user=self.current_user, locale=self.locale, _=self.locale.translate, pgettext=self.locale.pgettext, static_url=self.static_url, xsrf_form_html=self.xsrf_form_html, reverse_url=self.reverse_url, ) namespace.update(self.ui) return namespace
<SYSTEM_TASK:> Returns a new template loader for the given path. <END_TASK> <USER_TASK:> Description: def create_template_loader(self, template_path: str) -> template.BaseLoader: """Returns a new template loader for the given path. May be overridden by subclasses. By default returns a directory-based loader on the given path, using the ``autoescape`` and ``template_whitespace`` application settings. If a ``template_loader`` application setting is supplied, uses that instead. """
settings = self.application.settings if "template_loader" in settings: return settings["template_loader"] kwargs = {} if "autoescape" in settings: # autoescape=None means "no escaping", so we have to be sure # to only pass this kwarg if the user asked for it. kwargs["autoescape"] = settings["autoescape"] if "template_whitespace" in settings: kwargs["whitespace"] = settings["template_whitespace"] return template.Loader(template_path, **kwargs)
<SYSTEM_TASK:> Flushes the current output buffer to the network. <END_TASK> <USER_TASK:> Description: def flush(self, include_footers: bool = False) -> "Future[None]": """Flushes the current output buffer to the network. The ``callback`` argument, if given, can be used for flow control: it will be run when all flushed data has been written to the socket. Note that only one flush callback can be outstanding at a time; if another flush occurs before the previous flush's callback has been run, the previous callback will be discarded. .. versionchanged:: 4.0 Now returns a `.Future` if no callback is given. .. versionchanged:: 6.0 The ``callback`` argument was removed. """
assert self.request.connection is not None chunk = b"".join(self._write_buffer) self._write_buffer = [] if not self._headers_written: self._headers_written = True for transform in self._transforms: assert chunk is not None self._status_code, self._headers, chunk = transform.transform_first_chunk( self._status_code, self._headers, chunk, include_footers ) # Ignore the chunk and only write the headers for HEAD requests if self.request.method == "HEAD": chunk = b"" # Finalize the cookie headers (which have been stored in a side # object so an outgoing cookie could be overwritten before it # is sent). if hasattr(self, "_new_cookie"): for cookie in self._new_cookie.values(): self.add_header("Set-Cookie", cookie.OutputString(None)) start_line = httputil.ResponseStartLine("", self._status_code, self._reason) return self.request.connection.write_headers( start_line, self._headers, chunk ) else: for transform in self._transforms: chunk = transform.transform_chunk(chunk, include_footers) # Ignore the chunk and only write the headers for HEAD requests if self.request.method != "HEAD": return self.request.connection.write(chunk) else: future = Future() # type: Future[None] future.set_result(None) return future
<SYSTEM_TASK:> Sends the given HTTP error code to the browser. <END_TASK> <USER_TASK:> Description: def send_error(self, status_code: int = 500, **kwargs: Any) -> None: """Sends the given HTTP error code to the browser. If `flush()` has already been called, it is not possible to send an error, so this method will simply terminate the response. If output has been written but not yet flushed, it will be discarded and replaced with the error page. Override `write_error()` to customize the error page that is returned. Additional keyword arguments are passed through to `write_error`. """
if self._headers_written: gen_log.error("Cannot send error response after headers written") if not self._finished: # If we get an error between writing headers and finishing, # we are unlikely to be able to finish due to a # Content-Length mismatch. Try anyway to release the # socket. try: self.finish() except Exception: gen_log.error("Failed to flush partial response", exc_info=True) return self.clear() reason = kwargs.get("reason") if "exc_info" in kwargs: exception = kwargs["exc_info"][1] if isinstance(exception, HTTPError) and exception.reason: reason = exception.reason self.set_status(status_code, reason=reason) try: self.write_error(status_code, **kwargs) except Exception: app_log.error("Uncaught exception in write_error", exc_info=True) if not self._finished: self.finish()
<SYSTEM_TASK:> Override to implement custom error pages. <END_TASK> <USER_TASK:> Description: def write_error(self, status_code: int, **kwargs: Any) -> None: """Override to implement custom error pages. ``write_error`` may call `write`, `render`, `set_header`, etc to produce output as usual. If this error was caused by an uncaught exception (including HTTPError), an ``exc_info`` triple will be available as ``kwargs["exc_info"]``. Note that this exception may not be the "current" exception for purposes of methods like ``sys.exc_info()`` or ``traceback.format_exc``. """
if self.settings.get("serve_traceback") and "exc_info" in kwargs: # in debug mode, try to send a traceback self.set_header("Content-Type", "text/plain") for line in traceback.format_exception(*kwargs["exc_info"]): self.write(line) self.finish() else: self.finish( "<html><title>%(code)d: %(message)s</title>" "<body>%(code)d: %(message)s</body></html>" % {"code": status_code, "message": self._reason} )
<SYSTEM_TASK:> The locale for the current session. <END_TASK> <USER_TASK:> Description: def locale(self) -> tornado.locale.Locale: """The locale for the current session. Determined by either `get_user_locale`, which you can override to set the locale based on, e.g., a user preference stored in a database, or `get_browser_locale`, which uses the ``Accept-Language`` header. .. versionchanged: 4.1 Added a property setter. """
if not hasattr(self, "_locale"): loc = self.get_user_locale() if loc is not None: self._locale = loc else: self._locale = self.get_browser_locale() assert self._locale return self._locale
<SYSTEM_TASK:> The authenticated user for this request. <END_TASK> <USER_TASK:> Description: def current_user(self) -> Any: """The authenticated user for this request. This is set in one of two ways: * A subclass may override `get_current_user()`, which will be called automatically the first time ``self.current_user`` is accessed. `get_current_user()` will only be called once per request, and is cached for future access:: def get_current_user(self): user_cookie = self.get_secure_cookie("user") if user_cookie: return json.loads(user_cookie) return None * It may be set as a normal variable, typically from an overridden `prepare()`:: @gen.coroutine def prepare(self): user_id_cookie = self.get_secure_cookie("user_id") if user_id_cookie: self.current_user = yield load_user(user_id_cookie) Note that `prepare()` may be a coroutine while `get_current_user()` may not, so the latter form is necessary if loading the user requires asynchronous operations. The user object may be any type of the application's choosing. """
if not hasattr(self, "_current_user"): self._current_user = self.get_current_user() return self._current_user
<SYSTEM_TASK:> Read or generate the xsrf token in its raw form. <END_TASK> <USER_TASK:> Description: def _get_raw_xsrf_token(self) -> Tuple[Optional[int], bytes, float]: """Read or generate the xsrf token in its raw form. The raw_xsrf_token is a tuple containing: * version: the version of the cookie from which this token was read, or None if we generated a new token in this request. * token: the raw token data; random (non-ascii) bytes. * timestamp: the time this token was generated (will not be accurate for version 1 cookies) """
if not hasattr(self, "_raw_xsrf_token"): cookie = self.get_cookie("_xsrf") if cookie: version, token, timestamp = self._decode_xsrf_token(cookie) else: version, token, timestamp = None, None, None if token is None: version = None token = os.urandom(16) timestamp = time.time() assert token is not None assert timestamp is not None self._raw_xsrf_token = (version, token, timestamp) return self._raw_xsrf_token
<SYSTEM_TASK:> Convert a cookie string into a the tuple form returned by <END_TASK> <USER_TASK:> Description: def _decode_xsrf_token( self, cookie: str ) -> Tuple[Optional[int], Optional[bytes], Optional[float]]: """Convert a cookie string into a the tuple form returned by _get_raw_xsrf_token. """
try: m = _signed_value_version_re.match(utf8(cookie)) if m: version = int(m.group(1)) if version == 2: _, mask_str, masked_token, timestamp_str = cookie.split("|") mask = binascii.a2b_hex(utf8(mask_str)) token = _websocket_mask(mask, binascii.a2b_hex(utf8(masked_token))) timestamp = int(timestamp_str) return version, token, timestamp else: # Treat unknown versions as not present instead of failing. raise Exception("Unknown xsrf cookie version") else: version = 1 try: token = binascii.a2b_hex(utf8(cookie)) except (binascii.Error, TypeError): token = utf8(cookie) # We don't have a usable timestamp in older versions. timestamp = int(time.time()) return (version, token, timestamp) except Exception: # Catch exceptions and return nothing instead of failing. gen_log.debug("Uncaught exception in _decode_xsrf_token", exc_info=True) return None, None, None
<SYSTEM_TASK:> Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument. <END_TASK> <USER_TASK:> Description: def check_xsrf_cookie(self) -> None: """Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument. To prevent cross-site request forgery, we set an ``_xsrf`` cookie and include the same value as a non-cookie field with all ``POST`` requests. If the two do not match, we reject the form submission as a potential forgery. The ``_xsrf`` value may be set as either a form field named ``_xsrf`` or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken`` (the latter is accepted for compatibility with Django). See http://en.wikipedia.org/wiki/Cross-site_request_forgery .. versionchanged:: 3.2.2 Added support for cookie version 2. Both versions 1 and 2 are supported. """
# Prior to release 1.1.1, this check was ignored if the HTTP header # ``X-Requested-With: XMLHTTPRequest`` was present. This exception # has been shown to be insecure and has been removed. For more # information please see # http://www.djangoproject.com/weblog/2011/feb/08/security/ # http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails token = ( self.get_argument("_xsrf", None) or self.request.headers.get("X-Xsrftoken") or self.request.headers.get("X-Csrftoken") ) if not token: raise HTTPError(403, "'_xsrf' argument missing from POST") _, token, _ = self._decode_xsrf_token(token) _, expected_token, _ = self._get_raw_xsrf_token() if not token: raise HTTPError(403, "'_xsrf' argument has invalid format") if not hmac.compare_digest(utf8(token), utf8(expected_token)): raise HTTPError(403, "XSRF cookie does not match POST argument")
<SYSTEM_TASK:> Returns a static URL for the given relative static file path. <END_TASK> <USER_TASK:> Description: def static_url(self, path: str, include_host: bool = None, **kwargs: Any) -> str: """Returns a static URL for the given relative static file path. This method requires you set the ``static_path`` setting in your application (which specifies the root directory of your static files). This method returns a versioned url (by default appending ``?v=<signature>``), which allows the static files to be cached indefinitely. This can be disabled by passing ``include_version=False`` (in the default implementation; other static file implementations are not required to support this, but they may support other options). By default this method returns URLs relative to the current host, but if ``include_host`` is true the URL returned will be absolute. If this handler has an ``include_host`` attribute, that value will be used as the default for all `static_url` calls that do not pass ``include_host`` as a keyword argument. """
self.require_setting("static_path", "static_url") get_url = self.settings.get( "static_handler_class", StaticFileHandler ).make_static_url if include_host is None: include_host = getattr(self, "include_host", False) if include_host: base = self.request.protocol + "://" + self.request.host else: base = "" return base + get_url(self.settings, path, **kwargs)
<SYSTEM_TASK:> Computes the etag header to be used for this request. <END_TASK> <USER_TASK:> Description: def compute_etag(self) -> Optional[str]: """Computes the etag header to be used for this request. By default uses a hash of the content written so far. May be overridden to provide custom etag implementations, or may return None to disable tornado's default etag support. """
hasher = hashlib.sha1() for part in self._write_buffer: hasher.update(part) return '"%s"' % hasher.hexdigest()
<SYSTEM_TASK:> Checks the ``Etag`` header against requests's ``If-None-Match``. <END_TASK> <USER_TASK:> Description: def check_etag_header(self) -> bool: """Checks the ``Etag`` header against requests's ``If-None-Match``. Returns ``True`` if the request's Etag matches and a 304 should be returned. For example:: self.set_etag_header() if self.check_etag_header(): self.set_status(304) return This method is called automatically when the request is finished, but may be called earlier for applications that override `compute_etag` and want to do an early check for ``If-None-Match`` before completing the request. The ``Etag`` header should be set (perhaps with `set_etag_header`) before calling this method. """
computed_etag = utf8(self._headers.get("Etag", "")) # Find all weak and strong etag values from If-None-Match header # because RFC 7232 allows multiple etag values in a single header. etags = re.findall( br'\*|(?:W/)?"[^"]*"', utf8(self.request.headers.get("If-None-Match", "")) ) if not computed_etag or not etags: return False match = False if etags[0] == b"*": match = True else: # Use a weak comparison when comparing entity-tags. def val(x: bytes) -> bytes: return x[2:] if x.startswith(b"W/") else x for etag in etags: if val(etag) == val(computed_etag): match = True break return match
<SYSTEM_TASK:> Executes this request with the given output transforms. <END_TASK> <USER_TASK:> Description: async def _execute( self, transforms: List["OutputTransform"], *args: bytes, **kwargs: bytes ) -> None: """Executes this request with the given output transforms."""
self._transforms = transforms try: if self.request.method not in self.SUPPORTED_METHODS: raise HTTPError(405) self.path_args = [self.decode_argument(arg) for arg in args] self.path_kwargs = dict( (k, self.decode_argument(v, name=k)) for (k, v) in kwargs.items() ) # If XSRF cookies are turned on, reject form submissions without # the proper cookie if self.request.method not in ( "GET", "HEAD", "OPTIONS", ) and self.application.settings.get("xsrf_cookies"): self.check_xsrf_cookie() result = self.prepare() if result is not None: result = await result if self._prepared_future is not None: # Tell the Application we've finished with prepare() # and are ready for the body to arrive. future_set_result_unless_cancelled(self._prepared_future, None) if self._finished: return if _has_stream_request_body(self.__class__): # In streaming mode request.body is a Future that signals # the body has been completely received. The Future has no # result; the data has been passed to self.data_received # instead. try: await self.request._body_future except iostream.StreamClosedError: return method = getattr(self, self.request.method.lower()) result = method(*self.path_args, **self.path_kwargs) if result is not None: result = await result if self._auto_finish and not self._finished: self.finish() except Exception as e: try: self._handle_request_exception(e) except Exception: app_log.error("Exception in exception handler", exc_info=True) finally: # Unset result to avoid circular references result = None if self._prepared_future is not None and not self._prepared_future.done(): # In case we failed before setting _prepared_future, do it # now (to unblock the HTTP server). Note that this is not # in a finally block to avoid GC issues prior to Python 3.4. self._prepared_future.set_result(None)
<SYSTEM_TASK:> Override to customize logging of uncaught exceptions. <END_TASK> <USER_TASK:> Description: def log_exception( self, typ: "Optional[Type[BaseException]]", value: Optional[BaseException], tb: Optional[TracebackType], ) -> None: """Override to customize logging of uncaught exceptions. By default logs instances of `HTTPError` as warnings without stack traces (on the ``tornado.general`` logger), and all other exceptions as errors with stack traces (on the ``tornado.application`` logger). .. versionadded:: 3.1 """
if isinstance(value, HTTPError): if value.log_message: format = "%d %s: " + value.log_message args = [value.status_code, self._request_summary()] + list(value.args) gen_log.warning(format, *args) else: app_log.error( # type: ignore "Uncaught exception %s\n%r", self._request_summary(), self.request, exc_info=(typ, value, tb), )
<SYSTEM_TASK:> Starts an HTTP server for this application on the given port. <END_TASK> <USER_TASK:> Description: def listen(self, port: int, address: str = "", **kwargs: Any) -> HTTPServer: """Starts an HTTP server for this application on the given port. This is a convenience alias for creating an `.HTTPServer` object and calling its listen method. Keyword arguments not supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the `.HTTPServer` constructor. For advanced uses (e.g. multi-process mode), do not use this method; create an `.HTTPServer` and call its `.TCPServer.bind`/`.TCPServer.start` methods directly. Note that after calling this method you still need to call ``IOLoop.current().start()`` to start the server. Returns the `.HTTPServer` object. .. versionchanged:: 4.3 Now returns the `.HTTPServer` object. """
server = HTTPServer(self, **kwargs) server.listen(port, address) return server
<SYSTEM_TASK:> Appends the given handlers to our handler list. <END_TASK> <USER_TASK:> Description: def add_handlers(self, host_pattern: str, host_handlers: _RuleList) -> None: """Appends the given handlers to our handler list. Host patterns are processed sequentially in the order they were added. All matching patterns will be considered. """
host_matcher = HostMatches(host_pattern) rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers)) self.default_router.rules.insert(-1, rule) if self.default_host is not None: self.wildcard_router.add_rules( [(DefaultHostMatches(self, host_matcher.host_pattern), host_handlers)] )
<SYSTEM_TASK:> Returns `~.httputil.HTTPMessageDelegate` that can serve a request <END_TASK> <USER_TASK:> Description: def get_handler_delegate( self, request: httputil.HTTPServerRequest, target_class: Type[RequestHandler], target_kwargs: Dict[str, Any] = None, path_args: List[bytes] = None, path_kwargs: Dict[str, bytes] = None, ) -> "_HandlerDelegate": """Returns `~.httputil.HTTPMessageDelegate` that can serve a request for application and `RequestHandler` subclass. :arg httputil.HTTPServerRequest request: current HTTP request. :arg RequestHandler target_class: a `RequestHandler` class. :arg dict target_kwargs: keyword arguments for ``target_class`` constructor. :arg list path_args: positional arguments for ``target_class`` HTTP method that will be executed while handling a request (``get``, ``post`` or any other). :arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method. """
return _HandlerDelegate( self, request, target_class, target_kwargs, path_args, path_kwargs )
<SYSTEM_TASK:> Returns a URL path for handler named ``name`` <END_TASK> <USER_TASK:> Description: def reverse_url(self, name: str, *args: Any) -> str: """Returns a URL path for handler named ``name`` The handler must be added to the application as a named `URLSpec`. Args will be substituted for capturing groups in the `URLSpec` regex. They will be converted to strings if necessary, encoded as utf8, and url-escaped. """
reversed_url = self.default_router.reverse_url(name, *args) if reversed_url is not None: return reversed_url raise KeyError("%s not found in named urls" % name)