text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Stops listening for new connections. <END_TASK> <USER_TASK:> Description: def stop(self) -> None: """Stops listening for new connections. Requests currently in progress may still continue after the server is stopped. """
if self._stopped: return self._stopped = True for fd, sock in self._sockets.items(): assert sock.fileno() == fd # Unregister socket from IOLoop self._handlers.pop(fd)() sock.close()
<SYSTEM_TASK:> Put an item into the queue, perhaps waiting until there is room. <END_TASK> <USER_TASK:> Description: def put( self, item: _T, timeout: Union[float, datetime.timedelta] = None ) -> "Future[None]": """Put an item into the queue, perhaps waiting until there is room. Returns a Future, which raises `tornado.util.TimeoutError` after a timeout. ``timeout`` may be a number denoting a time (on the same scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a `datetime.timedelta` object for a deadline relative to the current time. """
future = Future() # type: Future[None] try: self.put_nowait(item) except QueueFull: self._putters.append((item, future)) _set_timeout(future, timeout) else: future.set_result(None) return future
<SYSTEM_TASK:> Remove and return an item from the queue without blocking. <END_TASK> <USER_TASK:> Description: def get_nowait(self) -> _T: """Remove and return an item from the queue without blocking. Return an item if one is immediately available, else raise `QueueEmpty`. """
self._consume_expired() if self._putters: assert self.full(), "queue not full, why are putters waiting?" item, putter = self._putters.popleft() self.__put_internal(item) future_set_result_unless_cancelled(putter, None) return self._get() elif self.qsize(): return self._get() else: raise QueueEmpty
<SYSTEM_TASK:> Block until all items in the queue are processed. <END_TASK> <USER_TASK:> Description: def join(self, timeout: Union[float, datetime.timedelta] = None) -> Awaitable[None]: """Block until all items in the queue are processed. Returns an awaitable, which raises `tornado.util.TimeoutError` after a timeout. """
return self._finished.wait(timeout)
<SYSTEM_TASK:> Returns the number of processors on this machine. <END_TASK> <USER_TASK:> Description: def cpu_count() -> int: """Returns the number of processors on this machine."""
if multiprocessing is None: return 1 try: return multiprocessing.cpu_count() except NotImplementedError: pass try: return os.sysconf("SC_NPROCESSORS_CONF") except (AttributeError, ValueError): pass gen_log.error("Could not detect number of processors; assuming 1") return 1
<SYSTEM_TASK:> Starts multiple worker processes. <END_TASK> <USER_TASK:> Description: def fork_processes(num_processes: Optional[int], max_restarts: int = None) -> int: """Starts multiple worker processes. If ``num_processes`` is None or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If ``num_processes`` is given and > 0, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the ``autoreload=True`` option to `tornado.web.Application` which defaults to True when ``debug=True``). When using multiple processes, no IOLoops can be created or referenced until after the call to ``fork_processes``. In each child process, ``fork_processes`` returns its *task id*, a number between 0 and ``num_processes``. Processes that exit abnormally (due to a signal or non-zero exit status) are restarted with the same id (up to ``max_restarts`` times). In the parent process, ``fork_processes`` returns None if all child processes have exited normally, but will otherwise only exit by throwing an exception. max_restarts defaults to 100. Availability: Unix """
if max_restarts is None: max_restarts = 100 global _task_id assert _task_id is None if num_processes is None or num_processes <= 0: num_processes = cpu_count() gen_log.info("Starting %d processes", num_processes) children = {} def start_child(i: int) -> Optional[int]: pid = os.fork() if pid == 0: # child process _reseed_random() global _task_id _task_id = i return i else: children[pid] = i return None for i in range(num_processes): id = start_child(i) if id is not None: return id num_restarts = 0 while children: try: pid, status = os.wait() except OSError as e: if errno_from_exception(e) == errno.EINTR: continue raise if pid not in children: continue id = children.pop(pid) if os.WIFSIGNALED(status): gen_log.warning( "child %d (pid %d) killed by signal %d, restarting", id, pid, os.WTERMSIG(status), ) elif os.WEXITSTATUS(status) != 0: gen_log.warning( "child %d (pid %d) exited with status %d, restarting", id, pid, os.WEXITSTATUS(status), ) else: gen_log.info("child %d (pid %d) exited normally", id, pid) continue num_restarts += 1 if num_restarts > max_restarts: raise RuntimeError("Too many child restarts, giving up") new_id = start_child(id) if new_id is not None: return new_id # All child processes exited cleanly, so exit the master process # instead of just returning to right after the call to # fork_processes (which will probably just start up another IOLoop # unless the caller checks the return value). sys.exit(0)
<SYSTEM_TASK:> Runs ``callback`` when this process exits. <END_TASK> <USER_TASK:> Description: def set_exit_callback(self, callback: Callable[[int], None]) -> None: """Runs ``callback`` when this process exits. The callback takes one argument, the return code of the process. This method uses a ``SIGCHLD`` handler, which is a global setting and may conflict if you have other libraries trying to handle the same signal. If you are using more than one ``IOLoop`` it may be necessary to call `Subprocess.initialize` first to designate one ``IOLoop`` to run the signal handlers. In many cases a close callback on the stdout or stderr streams can be used as an alternative to an exit callback if the signal handler is causing a problem. Availability: Unix """
self._exit_callback = callback Subprocess.initialize() Subprocess._waiting[self.pid] = self Subprocess._try_cleanup_process(self.pid)
<SYSTEM_TASK:> Returns a `.Future` which resolves when the process exits. <END_TASK> <USER_TASK:> Description: def wait_for_exit(self, raise_error: bool = True) -> "Future[int]": """Returns a `.Future` which resolves when the process exits. Usage:: ret = yield proc.wait_for_exit() This is a coroutine-friendly alternative to `set_exit_callback` (and a replacement for the blocking `subprocess.Popen.wait`). By default, raises `subprocess.CalledProcessError` if the process has a non-zero exit status. Use ``wait_for_exit(raise_error=False)`` to suppress this behavior and return the exit status without raising. .. versionadded:: 4.2 Availability: Unix """
future = Future() # type: Future[int] def callback(ret: int) -> None: if ret != 0 and raise_error: # Unfortunately we don't have the original args any more. future_set_exception_unless_cancelled( future, CalledProcessError(ret, "unknown") ) else: future_set_result_unless_cancelled(future, ret) self.set_exit_callback(callback) return future
<SYSTEM_TASK:> Initializes the ``SIGCHLD`` handler. <END_TASK> <USER_TASK:> Description: def initialize(cls) -> None: """Initializes the ``SIGCHLD`` handler. The signal handler is run on an `.IOLoop` to avoid locking issues. Note that the `.IOLoop` used for signal handling need not be the same one used by individual Subprocess objects (as long as the ``IOLoops`` are each running in separate threads). .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. Availability: Unix """
if cls._initialized: return io_loop = ioloop.IOLoop.current() cls._old_sigchld = signal.signal( signal.SIGCHLD, lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup), ) cls._initialized = True
<SYSTEM_TASK:> Called by libcurl when it wants to change the file descriptors <END_TASK> <USER_TASK:> Description: def _handle_socket(self, event: int, fd: int, multi: Any, data: bytes) -> None: """Called by libcurl when it wants to change the file descriptors it cares about. """
event_map = { pycurl.POLL_NONE: ioloop.IOLoop.NONE, pycurl.POLL_IN: ioloop.IOLoop.READ, pycurl.POLL_OUT: ioloop.IOLoop.WRITE, pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE, } if event == pycurl.POLL_REMOVE: if fd in self._fds: self.io_loop.remove_handler(fd) del self._fds[fd] else: ioloop_event = event_map[event] # libcurl sometimes closes a socket and then opens a new # one using the same FD without giving us a POLL_NONE in # between. This is a problem with the epoll IOLoop, # because the kernel can tell when a socket is closed and # removes it from the epoll automatically, causing future # update_handler calls to fail. Since we can't tell when # this has happened, always use remove and re-add # instead of update. if fd in self._fds: self.io_loop.remove_handler(fd) self.io_loop.add_handler(fd, self._handle_events, ioloop_event) self._fds[fd] = ioloop_event
<SYSTEM_TASK:> Called by libcurl to schedule a timeout. <END_TASK> <USER_TASK:> Description: def _set_timeout(self, msecs: int) -> None: """Called by libcurl to schedule a timeout."""
if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = self.io_loop.add_timeout( self.io_loop.time() + msecs / 1000.0, self._handle_timeout )
<SYSTEM_TASK:> Called by IOLoop when there is activity on one of our <END_TASK> <USER_TASK:> Description: def _handle_events(self, fd: int, events: int) -> None: """Called by IOLoop when there is activity on one of our file descriptors. """
action = 0 if events & ioloop.IOLoop.READ: action |= pycurl.CSELECT_IN if events & ioloop.IOLoop.WRITE: action |= pycurl.CSELECT_OUT while True: try: ret, num_handles = self._multi.socket_action(fd, action) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests()
<SYSTEM_TASK:> Called by IOLoop when the requested timeout has passed. <END_TASK> <USER_TASK:> Description: def _handle_timeout(self) -> None: """Called by IOLoop when the requested timeout has passed."""
self._timeout = None while True: try: ret, num_handles = self._multi.socket_action(pycurl.SOCKET_TIMEOUT, 0) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() # In theory, we shouldn't have to do this because curl will # call _set_timeout whenever the timeout changes. However, # sometimes after _handle_timeout we will need to reschedule # immediately even though nothing has changed from curl's # perspective. This is because when socket_action is # called with SOCKET_TIMEOUT, libcurl decides internally which # timeouts need to be processed by using a monotonic clock # (where available) while tornado uses python's time.time() # to decide when timeouts have occurred. When those clocks # disagree on elapsed time (as they will whenever there is an # NTP adjustment), tornado might call _handle_timeout before # libcurl is ready. After each timeout, resync the scheduled # timeout with libcurl's current state. new_timeout = self._multi.timeout() if new_timeout >= 0: self._set_timeout(new_timeout)
<SYSTEM_TASK:> Called by IOLoop periodically to ask libcurl to process any <END_TASK> <USER_TASK:> Description: def _handle_force_timeout(self) -> None: """Called by IOLoop periodically to ask libcurl to process any events it may have forgotten about. """
while True: try: ret, num_handles = self._multi.socket_all() except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests()
<SYSTEM_TASK:> Process any requests that were completed by the last <END_TASK> <USER_TASK:> Description: def _finish_pending_requests(self) -> None: """Process any requests that were completed by the last call to multi.socket_action. """
while True: num_q, ok_list, err_list = self._multi.info_read() for curl in ok_list: self._finish(curl) for curl, errnum, errmsg in err_list: self._finish(curl, errnum, errmsg) if num_q == 0: break self._process_queue()
<SYSTEM_TASK:> Starts the mock S3 server on the given port at the given path. <END_TASK> <USER_TASK:> Description: def start(port, root_directory, bucket_depth): """Starts the mock S3 server on the given port at the given path."""
application = S3Application(root_directory, bucket_depth) http_server = httpserver.HTTPServer(application) http_server.listen(port) ioloop.IOLoop.current().start()
<SYSTEM_TASK:> Executes a request, returning an `HTTPResponse`. <END_TASK> <USER_TASK:> Description: def fetch( self, request: Union["HTTPRequest", str], **kwargs: Any ) -> "HTTPResponse": """Executes a request, returning an `HTTPResponse`. The request may be either a string URL or an `HTTPRequest` object. If it is a string, we construct an `HTTPRequest` using any additional kwargs: ``HTTPRequest(request, **kwargs)`` If an error occurs during the fetch, we raise an `HTTPError` unless the ``raise_error`` keyword argument is set to False. """
response = self._io_loop.run_sync( functools.partial(self._async_client.fetch, request, **kwargs) ) return response
<SYSTEM_TASK:> Destroys this HTTP client, freeing any file descriptors used. <END_TASK> <USER_TASK:> Description: def close(self) -> None: """Destroys this HTTP client, freeing any file descriptors used. This method is **not needed in normal use** due to the way that `AsyncHTTPClient` objects are transparently reused. ``close()`` is generally only necessary when either the `.IOLoop` is also being closed, or the ``force_instance=True`` argument was used when creating the `AsyncHTTPClient`. No other methods may be called on the `AsyncHTTPClient` after ``close()``. """
if self._closed: return self._closed = True if self._instance_cache is not None: cached_val = self._instance_cache.pop(self.io_loop, None) # If there's an object other than self in the instance # cache for our IOLoop, something has gotten mixed up. A # value of None appears to be possible when this is called # from a destructor (HTTPClient.__del__) as the weakref # gets cleared before the destructor runs. if cached_val is not None and cached_val is not self: raise RuntimeError("inconsistent AsyncHTTPClient cache")
<SYSTEM_TASK:> Executes a request, asynchronously returning an `HTTPResponse`. <END_TASK> <USER_TASK:> Description: def fetch( self, request: Union[str, "HTTPRequest"], raise_error: bool = True, **kwargs: Any ) -> Awaitable["HTTPResponse"]: """Executes a request, asynchronously returning an `HTTPResponse`. The request may be either a string URL or an `HTTPRequest` object. If it is a string, we construct an `HTTPRequest` using any additional kwargs: ``HTTPRequest(request, **kwargs)`` This method returns a `.Future` whose result is an `HTTPResponse`. By default, the ``Future`` will raise an `HTTPError` if the request returned a non-200 response code (other errors may also be raised if the server could not be contacted). Instead, if ``raise_error`` is set to False, the response will always be returned regardless of the response code. If a ``callback`` is given, it will be invoked with the `HTTPResponse`. In the callback interface, `HTTPError` is not automatically raised. Instead, you must check the response's ``error`` attribute or call its `~HTTPResponse.rethrow` method. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. The ``raise_error=False`` argument only affects the `HTTPError` raised when a non-200 response code is used, instead of suppressing all errors. """
if self._closed: raise RuntimeError("fetch() called on closed AsyncHTTPClient") if not isinstance(request, HTTPRequest): request = HTTPRequest(url=request, **kwargs) else: if kwargs: raise ValueError( "kwargs can't be used if request is an HTTPRequest object" ) # We may modify this (to add Host, Accept-Encoding, etc), # so make sure we don't modify the caller's object. This is also # where normal dicts get converted to HTTPHeaders objects. request.headers = httputil.HTTPHeaders(request.headers) request_proxy = _RequestProxy(request, self.defaults) future = Future() # type: Future[HTTPResponse] def handle_response(response: "HTTPResponse") -> None: if response.error: if raise_error or not response._error_is_response_code: future_set_exception_unless_cancelled(future, response.error) return future_set_result_unless_cancelled(future, response) self.fetch_impl(cast(HTTPRequest, request_proxy), handle_response) return future
<SYSTEM_TASK:> Configures the `AsyncHTTPClient` subclass to use. <END_TASK> <USER_TASK:> Description: def configure( cls, impl: "Union[None, str, Type[Configurable]]", **kwargs: Any ) -> None: """Configures the `AsyncHTTPClient` subclass to use. ``AsyncHTTPClient()`` actually creates an instance of a subclass. This method may be called with either a class object or the fully-qualified name of such a class (or ``None`` to use the default, ``SimpleAsyncHTTPClient``) If additional keyword arguments are given, they will be passed to the constructor of each subclass instance created. The keyword argument ``max_clients`` determines the maximum number of simultaneous `~AsyncHTTPClient.fetch()` operations that can execute in parallel on each `.IOLoop`. Additional arguments may be supported depending on the implementation class in use. Example:: AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") """
super(AsyncHTTPClient, cls).configure(impl, **kwargs)
<SYSTEM_TASK:> Cleanup unused transports. <END_TASK> <USER_TASK:> Description: def _cleanup(self) -> None: """Cleanup unused transports."""
if self._cleanup_handle: self._cleanup_handle.cancel() now = self._loop.time() timeout = self._keepalive_timeout if self._conns: connections = {} deadline = now - timeout for key, conns in self._conns.items(): alive = [] for proto, use_time in conns: if proto.is_connected(): if use_time - deadline < 0: transport = proto.transport proto.close() if (key.is_ssl and not self._cleanup_closed_disabled): self._cleanup_closed_transports.append( transport) else: alive.append((proto, use_time)) if alive: connections[key] = alive self._conns = connections if self._conns: self._cleanup_handle = helpers.weakref_handle( self, '_cleanup', timeout, self._loop)
<SYSTEM_TASK:> Double confirmation for transport close. <END_TASK> <USER_TASK:> Description: def _cleanup_closed(self) -> None: """Double confirmation for transport close. Some broken ssl servers may leave socket open without proper close. """
if self._cleanup_closed_handle: self._cleanup_closed_handle.cancel() for transport in self._cleanup_closed_transports: if transport is not None: transport.abort() self._cleanup_closed_transports = [] if not self._cleanup_closed_disabled: self._cleanup_closed_handle = helpers.weakref_handle( self, '_cleanup_closed', self._cleanup_closed_period, self._loop)
<SYSTEM_TASK:> Return number of available connections taking into account <END_TASK> <USER_TASK:> Description: def _available_connections(self, key: 'ConnectionKey') -> int: """ Return number of available connections taking into account the limit, limit_per_host and the connection key. If it returns less than 1 means that there is no connections availables. """
if self._limit: # total calc available connections available = self._limit - len(self._acquired) # check limit per host if (self._limit_per_host and available > 0 and key in self._acquired_per_host): acquired = self._acquired_per_host.get(key) assert acquired is not None available = self._limit_per_host - len(acquired) elif self._limit_per_host and key in self._acquired_per_host: # check limit per host acquired = self._acquired_per_host.get(key) assert acquired is not None available = self._limit_per_host - len(acquired) else: available = 1 return available
<SYSTEM_TASK:> Get from pool or create new connection. <END_TASK> <USER_TASK:> Description: async def connect(self, req: 'ClientRequest', traces: List['Trace'], timeout: 'ClientTimeout') -> Connection: """Get from pool or create new connection."""
key = req.connection_key available = self._available_connections(key) # Wait if there are no available connections. if available <= 0: fut = self._loop.create_future() # This connection will now count towards the limit. waiters = self._waiters[key] waiters.append(fut) if traces: for trace in traces: await trace.send_connection_queued_start() try: await fut except BaseException as e: # remove a waiter even if it was cancelled, normally it's # removed when it's notified try: waiters.remove(fut) except ValueError: # fut may no longer be in list pass raise e finally: if not waiters: try: del self._waiters[key] except KeyError: # the key was evicted before. pass if traces: for trace in traces: await trace.send_connection_queued_end() proto = self._get(key) if proto is None: placeholder = cast(ResponseHandler, _TransportPlaceholder()) self._acquired.add(placeholder) self._acquired_per_host[key].add(placeholder) if traces: for trace in traces: await trace.send_connection_create_start() try: proto = await self._create_connection(req, traces, timeout) if self._closed: proto.close() raise ClientConnectionError("Connector is closed.") except BaseException: if not self._closed: self._acquired.remove(placeholder) self._drop_acquired_per_host(key, placeholder) self._release_waiter() raise else: if not self._closed: self._acquired.remove(placeholder) self._drop_acquired_per_host(key, placeholder) if traces: for trace in traces: await trace.send_connection_create_end() else: if traces: for trace in traces: await trace.send_connection_reuseconn() self._acquired.add(proto) self._acquired_per_host[key].add(proto) return Connection(self, key, proto, self._loop)
<SYSTEM_TASK:> Iterates over all waiters till found one that is not finsihed and <END_TASK> <USER_TASK:> Description: def _release_waiter(self) -> None: """ Iterates over all waiters till found one that is not finsihed and belongs to a host that has available connections. """
if not self._waiters: return # Having the dict keys ordered this avoids to iterate # at the same order at each call. queues = list(self._waiters.keys()) random.shuffle(queues) for key in queues: if self._available_connections(key) < 1: continue waiters = self._waiters[key] while waiters: waiter = waiters.popleft() if not waiter.done(): waiter.set_result(None) return
<SYSTEM_TASK:> Create connection. <END_TASK> <USER_TASK:> Description: async def _create_connection(self, req: 'ClientRequest', traces: List['Trace'], timeout: 'ClientTimeout') -> ResponseHandler: """Create connection. Has same keyword arguments as BaseEventLoop.create_connection. """
if req.proxy: _, proto = await self._create_proxy_connection( req, traces, timeout) else: _, proto = await self._create_direct_connection( req, traces, timeout) return proto
<SYSTEM_TASK:> Logic to get the correct SSL context <END_TASK> <USER_TASK:> Description: def _get_ssl_context(self, req: 'ClientRequest') -> Optional[SSLContext]: """Logic to get the correct SSL context 0. if req.ssl is false, return None 1. if ssl_context is specified in req, use it 2. if _ssl_context is specified in self, use it 3. otherwise: 1. if verify_ssl is not specified in req, use self.ssl_context (will generate a default context according to self.verify_ssl) 2. if verify_ssl is True in req, generate a default SSL context 3. if verify_ssl is False in req, generate a SSL context that won't verify """
if req.is_ssl(): if ssl is None: # pragma: no cover raise RuntimeError('SSL is not supported.') sslcontext = req.ssl if isinstance(sslcontext, ssl.SSLContext): return sslcontext if sslcontext is not None: # not verified or fingerprinted return self._make_ssl_context(False) sslcontext = self._ssl if isinstance(sslcontext, ssl.SSLContext): return sslcontext if sslcontext is not None: # not verified or fingerprinted return self._make_ssl_context(False) return self._make_ssl_context(True) else: return None
<SYSTEM_TASK:> Returns this jar's cookies filtered by their attributes. <END_TASK> <USER_TASK:> Description: def filter_cookies(self, request_url: URL=URL()) -> 'BaseCookie[str]': """Returns this jar's cookies filtered by their attributes."""
self._do_expiration() request_url = URL(request_url) filtered = SimpleCookie() hostname = request_url.raw_host or "" is_not_secure = request_url.scheme not in ("https", "wss") for cookie in self: name = cookie.key domain = cookie["domain"] # Send shared cookies if not domain: filtered[name] = cookie.value continue if not self._unsafe and is_ip_address(hostname): continue if (domain, name) in self._host_only_cookies: if domain != hostname: continue elif not self._is_domain_match(domain, hostname): continue if not self._is_path_match(request_url.path, cookie["path"]): continue if is_not_secure and cookie["secure"]: continue # It's critical we use the Morsel so the coded_value # (based on cookie version) is preserved mrsl_val = cast('Morsel[str]', cookie.get(cookie.key, Morsel())) mrsl_val.set(cookie.key, cookie.value, cookie.coded_value) filtered[name] = mrsl_val return filtered
<SYSTEM_TASK:> Implements domain matching adhering to RFC 6265. <END_TASK> <USER_TASK:> Description: def _is_domain_match(domain: str, hostname: str) -> bool: """Implements domain matching adhering to RFC 6265."""
if hostname == domain: return True if not hostname.endswith(domain): return False non_matching = hostname[:-len(domain)] if not non_matching.endswith("."): return False return not is_ip_address(hostname)
<SYSTEM_TASK:> Implements path matching adhering to RFC 6265. <END_TASK> <USER_TASK:> Description: def _is_path_match(req_path: str, cookie_path: str) -> bool: """Implements path matching adhering to RFC 6265."""
if not req_path.startswith("/"): req_path = "/" if req_path == cookie_path: return True if not req_path.startswith(cookie_path): return False if cookie_path.endswith("/"): return True non_matching = req_path[len(cookie_path):] return non_matching.startswith("/")
<SYSTEM_TASK:> Parser is used with StreamParser for incremental protocol parsing. <END_TASK> <USER_TASK:> Description: def my_protocol_parser(out, buf): """Parser is used with StreamParser for incremental protocol parsing. Parser is a generator function, but it is not a coroutine. Usually parsers are implemented as a state machine. more details in asyncio/parsers.py existing parsers: * HTTP protocol parsers asyncio/http/protocol.py * websocket parser asyncio/http/websocket.py """
while True: tp = yield from buf.read(5) if tp in (MSG_PING, MSG_PONG): # skip line yield from buf.skipuntil(b'\r\n') out.feed_data(Message(tp, None)) elif tp == MSG_STOP: out.feed_data(Message(tp, None)) elif tp == MSG_TEXT: # read text text = yield from buf.readuntil(b'\r\n') out.feed_data(Message(tp, text.strip().decode('utf-8'))) else: raise ValueError('Unknown protocol prefix.')
<SYSTEM_TASK:> Clone itself with replacement some attributes. <END_TASK> <USER_TASK:> Description: def clone(self, *, method: str=sentinel, rel_url: StrOrURL=sentinel, headers: LooseHeaders=sentinel, scheme: str=sentinel, host: str=sentinel, remote: str=sentinel) -> 'BaseRequest': """Clone itself with replacement some attributes. Creates and returns a new instance of Request object. If no parameters are given, an exact copy is returned. If a parameter is not passed, it will reuse the one from the current request object. """
if self._read_bytes: raise RuntimeError("Cannot clone request " "after reading its content") dct = {} # type: Dict[str, Any] if method is not sentinel: dct['method'] = method if rel_url is not sentinel: new_url = URL(rel_url) dct['url'] = new_url dct['path'] = str(new_url) if headers is not sentinel: # a copy semantic dct['headers'] = CIMultiDictProxy(CIMultiDict(headers)) dct['raw_headers'] = tuple((k.encode('utf-8'), v.encode('utf-8')) for k, v in headers.items()) message = self._message._replace(**dct) kwargs = {} if scheme is not sentinel: kwargs['scheme'] = scheme if host is not sentinel: kwargs['host'] = host if remote is not sentinel: kwargs['remote'] = remote return self.__class__( message, self._payload, self._protocol, self._payload_writer, self._task, self._loop, client_max_size=self._client_max_size, state=self._state.copy(), **kwargs)
<SYSTEM_TASK:> Hostname of the request. <END_TASK> <USER_TASK:> Description: def host(self) -> str: """Hostname of the request. Hostname is resolved in this order: - overridden value by .clone(host=new_host) call. - HOST HTTP header - socket.getfqdn() value """
host = self._message.headers.get(hdrs.HOST) if host is not None: return host else: return socket.getfqdn()
<SYSTEM_TASK:> Remote IP of client initiated HTTP request. <END_TASK> <USER_TASK:> Description: def remote(self) -> Optional[str]: """Remote IP of client initiated HTTP request. The IP is resolved in this order: - overridden value by .clone(remote=new_remote) call. - peername of opened socket """
if isinstance(self._transport_peername, (list, tuple)): return self._transport_peername[0] else: return self._transport_peername
<SYSTEM_TASK:> The value of If-Modified-Since HTTP header, or None. <END_TASK> <USER_TASK:> Description: def if_modified_since(self) -> Optional[datetime.datetime]: """The value of If-Modified-Since HTTP header, or None. This header is represented as a `datetime` object. """
return self._http_date(self.headers.get(hdrs.IF_MODIFIED_SINCE))
<SYSTEM_TASK:> The value of If-Unmodified-Since HTTP header, or None. <END_TASK> <USER_TASK:> Description: def if_unmodified_since(self) -> Optional[datetime.datetime]: """The value of If-Unmodified-Since HTTP header, or None. This header is represented as a `datetime` object. """
return self._http_date(self.headers.get(hdrs.IF_UNMODIFIED_SINCE))
<SYSTEM_TASK:> The value of If-Range HTTP header, or None. <END_TASK> <USER_TASK:> Description: def if_range(self) -> Optional[datetime.datetime]: """The value of If-Range HTTP header, or None. This header is represented as a `datetime` object. """
return self._http_date(self.headers.get(hdrs.IF_RANGE))
<SYSTEM_TASK:> Return True if request's HTTP BODY can be read, False otherwise. <END_TASK> <USER_TASK:> Description: def has_body(self) -> bool: """Return True if request's HTTP BODY can be read, False otherwise."""
warnings.warn( "Deprecated, use .can_read_body #2005", DeprecationWarning, stacklevel=2) return not self._payload.at_eof()
<SYSTEM_TASK:> Read request body if present. <END_TASK> <USER_TASK:> Description: async def read(self) -> bytes: """Read request body if present. Returns bytes object with full request content. """
if self._read_bytes is None: body = bytearray() while True: chunk = await self._payload.readany() body.extend(chunk) if self._client_max_size: body_size = len(body) if body_size >= self._client_max_size: raise HTTPRequestEntityTooLarge( max_size=self._client_max_size, actual_size=body_size ) if not chunk: break self._read_bytes = bytes(body) return self._read_bytes
<SYSTEM_TASK:> Return BODY as text using encoding from .charset. <END_TASK> <USER_TASK:> Description: async def text(self) -> str: """Return BODY as text using encoding from .charset."""
bytes_body = await self.read() encoding = self.charset or 'utf-8' return bytes_body.decode(encoding)
<SYSTEM_TASK:> Worker process is about to exit, we need cleanup everything and <END_TASK> <USER_TASK:> Description: async def shutdown(self, timeout: Optional[float]=15.0) -> None: """Worker process is about to exit, we need cleanup everything and stop accepting requests. It is especially important for keep-alive connections."""
self._force_close = True if self._keepalive_handle is not None: self._keepalive_handle.cancel() if self._waiter: self._waiter.cancel() # wait for handlers with suppress(asyncio.CancelledError, asyncio.TimeoutError): with CeilTimeout(timeout, loop=self._loop): if (self._error_handler is not None and not self._error_handler.done()): await self._error_handler if (self._task_handler is not None and not self._task_handler.done()): await self._task_handler # force-close non-idle handler if self._task_handler is not None: self._task_handler.cancel() if self.transport is not None: self.transport.close() self.transport = None
<SYSTEM_TASK:> Set keep-alive connection mode. <END_TASK> <USER_TASK:> Description: def keep_alive(self, val: bool) -> None: """Set keep-alive connection mode. :param bool val: new state. """
self._keepalive = val if self._keepalive_handle: self._keepalive_handle.cancel() self._keepalive_handle = None
<SYSTEM_TASK:> Stop accepting new pipelinig messages and close <END_TASK> <USER_TASK:> Description: def close(self) -> None: """Stop accepting new pipelinig messages and close connection when handlers done processing messages"""
self._close = True if self._waiter: self._waiter.cancel()
<SYSTEM_TASK:> Force close connection <END_TASK> <USER_TASK:> Description: def force_close(self) -> None: """Force close connection"""
self._force_close = True if self._waiter: self._waiter.cancel() if self.transport is not None: self.transport.close() self.transport = None
<SYSTEM_TASK:> Returns an asynchronous iterator that yields chunks of size n. <END_TASK> <USER_TASK:> Description: def iter_chunked(self, n: int) -> AsyncStreamIterator[bytes]: """Returns an asynchronous iterator that yields chunks of size n. Python-3.5 available for Python 3.5+ only """
return AsyncStreamIterator(lambda: self.read(n))
<SYSTEM_TASK:> rollback reading some data from stream, inserting it to buffer head. <END_TASK> <USER_TASK:> Description: def unread_data(self, data: bytes) -> None: """ rollback reading some data from stream, inserting it to buffer head. """
warnings.warn("unread_data() is deprecated " "and will be removed in future releases (#3260)", DeprecationWarning, stacklevel=2) if not data: return if self._buffer_offset: self._buffer[0] = self._buffer[0][self._buffer_offset:] self._buffer_offset = 0 self._size += len(data) self._cursor -= len(data) self._buffer.appendleft(data) self._eof_counter = 0
<SYSTEM_TASK:> Read not more than n bytes, or whole buffer is n == -1 <END_TASK> <USER_TASK:> Description: def _read_nowait(self, n: int) -> bytes: """ Read not more than n bytes, or whole buffer is n == -1 """
chunks = [] while self._buffer: chunk = self._read_nowait_chunk(n) chunks.append(chunk) if n != -1: n -= len(chunk) if n == 0: break return b''.join(chunks) if chunks else b''
<SYSTEM_TASK:> Sends data to all registered receivers. <END_TASK> <USER_TASK:> Description: async def send(self, *args, **kwargs): """ Sends data to all registered receivers. """
if not self.frozen: raise RuntimeError("Cannot send non-frozen signal.") for receiver in self: await receiver(*args, **kwargs)
<SYSTEM_TASK:> Translate log_format into form usable by modulo formatting <END_TASK> <USER_TASK:> Description: def compile_format(self, log_format: str) -> Tuple[str, List[KeyMethod]]: """Translate log_format into form usable by modulo formatting All known atoms will be replaced with %s Also methods for formatting of those atoms will be added to _methods in appropriate order For example we have log_format = "%a %t" This format will be translated to "%s %s" Also contents of _methods will be [self._format_a, self._format_t] These method will be called and results will be passed to translated string format. Each _format_* method receive 'args' which is list of arguments given to self.log Exceptions are _format_e, _format_i and _format_o methods which also receive key name (by functools.partial) """
# list of (key, method) tuples, we don't use an OrderedDict as users # can repeat the same key more than once methods = list() for atom in self.FORMAT_RE.findall(log_format): if atom[1] == '': format_key1 = self.LOG_FORMAT_MAP[atom[0]] m = getattr(AccessLogger, '_format_%s' % atom[0]) key_method = KeyMethod(format_key1, m) else: format_key2 = (self.LOG_FORMAT_MAP[atom[2]], atom[1]) m = getattr(AccessLogger, '_format_%s' % atom[2]) key_method = KeyMethod(format_key2, functools.partial(m, atom[1])) methods.append(key_method) log_format = self.FORMAT_RE.sub(r'%s', log_format) log_format = self.CLEANUP_RE.sub(r'%\1', log_format) return log_format, methods
<SYSTEM_TASK:> Writes chunk of data to a stream. <END_TASK> <USER_TASK:> Description: async def write(self, chunk: bytes, *, drain: bool=True, LIMIT: int=0x10000) -> None: """Writes chunk of data to a stream. write_eof() indicates end of stream. writer can't be used after write_eof() method being called. write() return drain future. """
if self._on_chunk_sent is not None: await self._on_chunk_sent(chunk) if self._compress is not None: chunk = self._compress.compress(chunk) if not chunk: return if self.length is not None: chunk_len = len(chunk) if self.length >= chunk_len: self.length = self.length - chunk_len else: chunk = chunk[:self.length] self.length = 0 if not chunk: return if chunk: if self.chunked: chunk_len_pre = ('%x\r\n' % len(chunk)).encode('ascii') chunk = chunk_len_pre + chunk + b'\r\n' self._write(chunk) if self.buffer_size > LIMIT and drain: self.buffer_size = 0 await self.drain()
<SYSTEM_TASK:> Attempt to load the netrc file from the path specified by the env-var <END_TASK> <USER_TASK:> Description: def netrc_from_env() -> Optional[netrc.netrc]: """Attempt to load the netrc file from the path specified by the env-var NETRC or in the default location in the user's home directory. Returns None if it couldn't be found or fails to parse. """
netrc_env = os.environ.get('NETRC') if netrc_env is not None: netrc_path = Path(netrc_env) else: try: home_dir = Path.home() except RuntimeError as e: # pragma: no cover # if pathlib can't resolve home, it may raise a RuntimeError client_logger.debug('Could not resolve home directory when ' 'trying to look for .netrc file: %s', e) return None netrc_path = home_dir / ( '_netrc' if platform.system() == 'Windows' else '.netrc') try: return netrc.netrc(str(netrc_path)) except netrc.NetrcParseError as e: client_logger.warning('Could not parse .netrc file: %s', e) except OSError as e: # we couldn't read the file (doesn't exist, permissions, etc.) if netrc_env or netrc_path.is_file(): # only warn if the environment wanted us to load it, # or it appears like the default file does actually exist client_logger.warning('Could not read .netrc file: %s', e) return None
<SYSTEM_TASK:> Parses a MIME type into its components. <END_TASK> <USER_TASK:> Description: def parse_mimetype(mimetype: str) -> MimeType: """Parses a MIME type into its components. mimetype is a MIME type string. Returns a MimeType object. Example: >>> parse_mimetype('text/html; charset=utf-8') MimeType(type='text', subtype='html', suffix='', parameters={'charset': 'utf-8'}) """
if not mimetype: return MimeType(type='', subtype='', suffix='', parameters=MultiDictProxy(MultiDict())) parts = mimetype.split(';') params = MultiDict() # type: MultiDict[str] for item in parts[1:]: if not item: continue key, value = cast(Tuple[str, str], item.split('=', 1) if '=' in item else (item, '')) params.add(key.lower().strip(), value.strip(' "')) fulltype = parts[0].strip().lower() if fulltype == '*': fulltype = '*/*' mtype, stype = (cast(Tuple[str, str], fulltype.split('/', 1)) if '/' in fulltype else (fulltype, '')) stype, suffix = (cast(Tuple[str, str], stype.split('+', 1)) if '+' in stype else (stype, '')) return MimeType(type=mtype, subtype=stype, suffix=suffix, parameters=MultiDictProxy(params))
<SYSTEM_TASK:> Create a BasicAuth object from an Authorization HTTP header. <END_TASK> <USER_TASK:> Description: def decode(cls, auth_header: str, encoding: str='latin1') -> 'BasicAuth': """Create a BasicAuth object from an Authorization HTTP header."""
try: auth_type, encoded_credentials = auth_header.split(' ', 1) except ValueError: raise ValueError('Could not parse authorization header.') if auth_type.lower() != 'basic': raise ValueError('Unknown authorization method %s' % auth_type) try: decoded = base64.b64decode( encoded_credentials.encode('ascii'), validate=True ).decode(encoding) except binascii.Error: raise ValueError('Invalid base64 encoding.') try: # RFC 2617 HTTP Authentication # https://www.ietf.org/rfc/rfc2617.txt # the colon must be present, but the username and password may be # otherwise blank. username, password = decoded.split(':', 1) except ValueError: raise ValueError('Invalid credentials.') return cls(username, password, encoding=encoding)
<SYSTEM_TASK:> Create BasicAuth from url. <END_TASK> <USER_TASK:> Description: def from_url(cls, url: URL, *, encoding: str='latin1') -> Optional['BasicAuth']: """Create BasicAuth from url."""
if not isinstance(url, URL): raise TypeError("url should be yarl.URL instance") if url.user is None: return None return cls(url.user, url.password or '', encoding=encoding)
<SYSTEM_TASK:> Close underlying connector. <END_TASK> <USER_TASK:> Description: async def close(self) -> None: """Close underlying connector. Release all acquired resources. """
if not self.closed: if self._connector is not None and self._connector_owner: await self._connector.close() self._connector = None
<SYSTEM_TASK:> Do URL requoting on redirection handling. <END_TASK> <USER_TASK:> Description: def requote_redirect_url(self, val: bool) -> None: """Do URL requoting on redirection handling."""
warnings.warn("session.requote_redirect_url modification " "is deprecated #2778", DeprecationWarning, stacklevel=2) self._requote_redirect_url = val
<SYSTEM_TASK:> Emits next multipart reader object. <END_TASK> <USER_TASK:> Description: async def next(self) -> Any: """Emits next multipart reader object."""
item = await self.stream.next() if self.stream.at_eof(): await self.release() return item
<SYSTEM_TASK:> Reads body part data. <END_TASK> <USER_TASK:> Description: async def read(self, *, decode: bool=False) -> Any: """Reads body part data. decode: Decodes data following by encoding method from Content-Encoding header. If it missed data remains untouched """
if self._at_eof: return b'' data = bytearray() while not self._at_eof: data.extend((await self.read_chunk(self.chunk_size))) if decode: return self.decode(data) return data
<SYSTEM_TASK:> Reads body part content chunk of the specified size. <END_TASK> <USER_TASK:> Description: async def read_chunk(self, size: int=chunk_size) -> bytes: """Reads body part content chunk of the specified size. size: chunk size """
if self._at_eof: return b'' if self._length: chunk = await self._read_chunk_from_length(size) else: chunk = await self._read_chunk_from_stream(size) self._read_bytes += len(chunk) if self._read_bytes == self._length: self._at_eof = True if self._at_eof: newline = await self._content.readline() assert newline == self._newline, \ 'reader did not read all the data or it is malformed' return chunk
<SYSTEM_TASK:> Reads body part by line by line. <END_TASK> <USER_TASK:> Description: async def readline(self) -> bytes: """Reads body part by line by line."""
if self._at_eof: return b'' if self._unread: line = self._unread.popleft() else: line = await self._content.readline() if line.startswith(self._boundary): # the very last boundary may not come with \r\n, # so set single rules for everyone sline = line.rstrip(b'\r\n') boundary = self._boundary last_boundary = self._boundary + b'--' # ensure that we read exactly the boundary, not something alike if sline == boundary or sline == last_boundary: self._at_eof = True self._unread.append(line) return b'' else: next_line = await self._content.readline() if next_line.startswith(self._boundary): # strip newline but only once line = line[:-len(self._newline)] self._unread.append(next_line) return line
<SYSTEM_TASK:> Decodes data according the specified Content-Encoding <END_TASK> <USER_TASK:> Description: def decode(self, data: bytes) -> bytes: """Decodes data according the specified Content-Encoding or Content-Transfer-Encoding headers value. """
if CONTENT_TRANSFER_ENCODING in self.headers: data = self._decode_content_transfer(data) if CONTENT_ENCODING in self.headers: return self._decode_content(data) return data
<SYSTEM_TASK:> Returns charset parameter from Content-Type header or default. <END_TASK> <USER_TASK:> Description: def get_charset(self, default: str) -> str: """Returns charset parameter from Content-Type header or default."""
ctype = self.headers.get(CONTENT_TYPE, '') mimetype = parse_mimetype(ctype) return mimetype.parameters.get('charset', default)
<SYSTEM_TASK:> Returns name specified in Content-Disposition header or None <END_TASK> <USER_TASK:> Description: def name(self) -> Optional[str]: """Returns name specified in Content-Disposition header or None if missed or header is malformed. """
_, params = parse_content_disposition( self.headers.get(CONTENT_DISPOSITION)) return content_disposition_filename(params, 'name')
<SYSTEM_TASK:> Constructs reader instance from HTTP response. <END_TASK> <USER_TASK:> Description: def from_response(cls, response: 'ClientResponse') -> Any: """Constructs reader instance from HTTP response. :param response: :class:`~aiohttp.client.ClientResponse` instance """
obj = cls.response_wrapper_cls(response, cls(response.headers, response.content)) return obj
<SYSTEM_TASK:> Emits the next multipart body part. <END_TASK> <USER_TASK:> Description: async def next(self) -> Any: """Emits the next multipart body part."""
# So, if we're at BOF, we need to skip till the boundary. if self._at_eof: return await self._maybe_release_last_part() if self._at_bof: await self._read_until_first_boundary() self._at_bof = False else: await self._read_boundary() if self._at_eof: # we just read the last boundary, nothing to do there return self._last_part = await self.fetch_next_part() return self._last_part
<SYSTEM_TASK:> Reads all the body parts to the void till the final boundary. <END_TASK> <USER_TASK:> Description: async def release(self) -> None: """Reads all the body parts to the void till the final boundary."""
while not self._at_eof: item = await self.next() if item is None: break await item.release()
<SYSTEM_TASK:> Dispatches the response by the `Content-Type` header, returning <END_TASK> <USER_TASK:> Description: def _get_part_reader(self, headers: 'CIMultiDictProxy[str]') -> Any: """Dispatches the response by the `Content-Type` header, returning suitable reader instance. :param dict headers: Response headers """
ctype = headers.get(CONTENT_TYPE, '') mimetype = parse_mimetype(ctype) if mimetype.type == 'multipart': if self.multipart_reader_cls is None: return type(self)(headers, self._content) return self.multipart_reader_cls( headers, self._content, _newline=self._newline ) else: return self.part_reader_cls( self._boundary, headers, self._content, _newline=self._newline )
<SYSTEM_TASK:> Ensures that the last read body part is read completely. <END_TASK> <USER_TASK:> Description: async def _maybe_release_last_part(self) -> None: """Ensures that the last read body part is read completely."""
if self._last_part is not None: if not self._last_part.at_eof(): await self._last_part.release() self._unread.extend(self._last_part._unread) self._last_part = None
<SYSTEM_TASK:> Wrap boundary parameter value in quotes, if necessary. <END_TASK> <USER_TASK:> Description: def _boundary_value(self) -> str: """Wrap boundary parameter value in quotes, if necessary. Reads self.boundary and returns a unicode sting. """
# Refer to RFCs 7231, 7230, 5234. # # parameter = token "=" ( token / quoted-string ) # token = 1*tchar # quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE # qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text # obs-text = %x80-FF # quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) # tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" # / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" # / DIGIT / ALPHA # ; any VCHAR, except delimiters # VCHAR = %x21-7E value = self._boundary if re.match(self._valid_tchar_regex, value): return value.decode('ascii') # cannot fail if re.search(self._invalid_qdtext_char_regex, value): raise ValueError("boundary value contains invalid characters") # escape %x5C and %x22 quoted_value_content = value.replace(b'\\', b'\\\\') quoted_value_content = quoted_value_content.replace(b'"', b'\\"') return '"' + quoted_value_content.decode('ascii') + '"'
<SYSTEM_TASK:> Adds a new body part to multipart writer. <END_TASK> <USER_TASK:> Description: def append_payload(self, payload: Payload) -> Payload: """Adds a new body part to multipart writer."""
# compression encoding = payload.headers.get(CONTENT_ENCODING, '').lower() # type: Optional[str] # noqa if encoding and encoding not in ('deflate', 'gzip', 'identity'): raise RuntimeError('unknown content encoding: {}'.format(encoding)) if encoding == 'identity': encoding = None # te encoding te_encoding = payload.headers.get( CONTENT_TRANSFER_ENCODING, '').lower() # type: Optional[str] # noqa if te_encoding not in ('', 'base64', 'quoted-printable', 'binary'): raise RuntimeError('unknown content transfer encoding: {}' ''.format(te_encoding)) if te_encoding == 'binary': te_encoding = None # size size = payload.size if size is not None and not (encoding or te_encoding): payload.headers[CONTENT_LENGTH] = str(size) self._parts.append((payload, encoding, te_encoding)) # type: ignore return payload
<SYSTEM_TASK:> Helper to append form urlencoded part. <END_TASK> <USER_TASK:> Description: def append_form( self, obj: Union[Sequence[Tuple[str, str]], Mapping[str, str]], headers: Optional['MultiMapping[str]']=None ) -> Payload: """Helper to append form urlencoded part."""
assert isinstance(obj, (Sequence, Mapping)) if headers is None: headers = CIMultiDict() if isinstance(obj, Mapping): obj = list(obj.items()) data = urlencode(obj, doseq=True) return self.append_payload( StringPayload(data, headers=headers, content_type='application/x-www-form-urlencoded'))
<SYSTEM_TASK:> Write body. <END_TASK> <USER_TASK:> Description: async def write(self, writer: Any, close_boundary: bool=True) -> None: """Write body."""
if not self._parts: return for part, encoding, te_encoding in self._parts: await writer.write(b'--' + self._boundary + b'\r\n') await writer.write(part._binary_headers) if encoding or te_encoding: w = MultipartPayloadWriter(writer) if encoding: w.enable_compression(encoding) if te_encoding: w.enable_encoding(te_encoding) await part.write(w) # type: ignore await w.write_eof() else: await part.write(writer) await writer.write(b'\r\n') if close_boundary: await writer.write(b'--' + self._boundary + b'--\r\n')
<SYSTEM_TASK:> Support coroutines that yields bytes objects. <END_TASK> <USER_TASK:> Description: async def write_bytes(self, writer: AbstractStreamWriter, conn: 'Connection') -> None: """Support coroutines that yields bytes objects."""
# 100 response if self._continue is not None: await writer.drain() await self._continue protocol = conn.protocol assert protocol is not None try: if isinstance(self.body, payload.Payload): await self.body.write(writer) else: if isinstance(self.body, (bytes, bytearray)): self.body = (self.body,) # type: ignore for chunk in self.body: await writer.write(chunk) # type: ignore await writer.write_eof() except OSError as exc: new_exc = ClientOSError( exc.errno, 'Can not write request body for %s' % self.url) new_exc.__context__ = exc new_exc.__cause__ = exc protocol.set_exception(new_exc) except asyncio.CancelledError as exc: if not conn.closed: protocol.set_exception(exc) except Exception as exc: protocol.set_exception(exc) finally: self._writer = None
<SYSTEM_TASK:> Start response processing. <END_TASK> <USER_TASK:> Description: async def start(self, connection: 'Connection') -> 'ClientResponse': """Start response processing."""
self._closed = False self._protocol = connection.protocol self._connection = connection with self._timer: while True: # read response try: message, payload = await self._protocol.read() # type: ignore # noqa except http.HttpProcessingError as exc: raise ClientResponseError( self.request_info, self.history, status=exc.code, message=exc.message, headers=exc.headers) from exc if (message.code < 100 or message.code > 199 or message.code == 101): break if self._continue is not None: set_result(self._continue, True) self._continue = None # payload eof handler payload.on_eof(self._response_eof) # response status self.version = message.version self.status = message.code self.reason = message.reason # headers self._headers = message.headers # type is CIMultiDictProxy self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes] # payload self.content = payload # cookies for hdr in self.headers.getall(hdrs.SET_COOKIE, ()): try: self.cookies.load(hdr) except CookieError as exc: client_logger.warning( 'Can not load response cookies: %s', exc) return self
<SYSTEM_TASK:> Read response payload and decode. <END_TASK> <USER_TASK:> Description: async def text(self, encoding: Optional[str]=None, errors: str='strict') -> str: """Read response payload and decode."""
if self._body is None: await self.read() if encoding is None: encoding = self.get_encoding() return self._body.decode(encoding, errors=errors)
<SYSTEM_TASK:> Enables automatic chunked transfer encoding. <END_TASK> <USER_TASK:> Description: def enable_chunked_encoding(self, chunk_size: Optional[int]=None) -> None: """Enables automatic chunked transfer encoding."""
self._chunked = True if hdrs.CONTENT_LENGTH in self._headers: raise RuntimeError("You can't enable chunked encoding when " "a content length is set") if chunk_size is not None: warnings.warn('Chunk size is deprecated #1615', DeprecationWarning)
<SYSTEM_TASK:> Set or update response cookie. <END_TASK> <USER_TASK:> Description: def set_cookie(self, name: str, value: str, *, expires: Optional[str]=None, domain: Optional[str]=None, max_age: Optional[Union[int, str]]=None, path: str='/', secure: Optional[str]=None, httponly: Optional[str]=None, version: Optional[str]=None) -> None: """Set or update response cookie. Sets new cookie or updates existent with new value. Also updates only those params which are not None. """
old = self._cookies.get(name) if old is not None and old.coded_value == '': # deleted cookie self._cookies.pop(name, None) self._cookies[name] = value c = self._cookies[name] if expires is not None: c['expires'] = expires elif c.get('expires') == 'Thu, 01 Jan 1970 00:00:00 GMT': del c['expires'] if domain is not None: c['domain'] = domain if max_age is not None: c['max-age'] = str(max_age) elif 'max-age' in c: del c['max-age'] c['path'] = path if secure is not None: c['secure'] = secure if httponly is not None: c['httponly'] = httponly if version is not None: c['version'] = version
<SYSTEM_TASK:> The value of Last-Modified HTTP header, or None. <END_TASK> <USER_TASK:> Description: def last_modified(self) -> Optional[datetime.datetime]: """The value of Last-Modified HTTP header, or None. This header is represented as a `datetime` object. """
httpdate = self._headers.get(hdrs.LAST_MODIFIED) if httpdate is not None: timetuple = parsedate(httpdate) if timetuple is not None: return datetime.datetime(*timetuple[:6], tzinfo=datetime.timezone.utc) return None
<SYSTEM_TASK:> Add static files view. <END_TASK> <USER_TASK:> Description: def add_static(self, prefix: str, path: PathLike, *, name: Optional[str]=None, expect_handler: Optional[_ExpectHandler]=None, chunk_size: int=256 * 1024, show_index: bool=False, follow_symlinks: bool=False, append_version: bool=False) -> AbstractResource: """Add static files view. prefix - url prefix path - folder with files """
assert prefix.startswith('/') if prefix.endswith('/'): prefix = prefix[:-1] resource = StaticResource(prefix, path, name=name, expect_handler=expect_handler, chunk_size=chunk_size, show_index=show_index, follow_symlinks=follow_symlinks, append_version=append_version) self.register_resource(resource) return resource
<SYSTEM_TASK:> Shortcut for add_route with method OPTIONS <END_TASK> <USER_TASK:> Description: def add_options(self, path: str, handler: _WebHandler, **kwargs: Any) -> AbstractRoute: """ Shortcut for add_route with method OPTIONS """
return self.add_route(hdrs.METH_OPTIONS, path, handler, **kwargs)
<SYSTEM_TASK:> Shortcut for add_route with method GET, if allow_head is true another <END_TASK> <USER_TASK:> Description: def add_get(self, path: str, handler: _WebHandler, *, name: Optional[str]=None, allow_head: bool=True, **kwargs: Any) -> AbstractRoute: """ Shortcut for add_route with method GET, if allow_head is true another route is added allowing head requests to the same endpoint """
resource = self.add_resource(path, name=name) if allow_head: resource.add_route(hdrs.METH_HEAD, handler, **kwargs) return resource.add_route(hdrs.METH_GET, handler, **kwargs)
<SYSTEM_TASK:> Shortcut for add_route with ANY methods for a class-based view <END_TASK> <USER_TASK:> Description: def add_view(self, path: str, handler: AbstractView, **kwargs: Any) -> AbstractRoute: """ Shortcut for add_route with ANY methods for a class-based view """
return self.add_route(hdrs.METH_ANY, path, handler, **kwargs)
<SYSTEM_TASK:> Append routes to route table. <END_TASK> <USER_TASK:> Description: def add_routes(self, routes: Iterable[AbstractRouteDef]) -> None: """Append routes to route table. Parameter should be a sequence of RouteDef objects. """
for route_def in routes: route_def.register(self)
<SYSTEM_TASK:> Parses RFC 5322 headers from a stream. <END_TASK> <USER_TASK:> Description: def parse_headers( self, lines: List[bytes] ) -> Tuple['CIMultiDictProxy[str]', RawHeaders, Optional[bool], Optional[str], bool, bool]: """Parses RFC 5322 headers from a stream. Line continuations are supported. Returns list of header name and value pairs. Header name is in upper case. """
headers, raw_headers = self._headers_parser.parse_headers(lines) close_conn = None encoding = None upgrade = False chunked = False # keep-alive conn = headers.get(hdrs.CONNECTION) if conn: v = conn.lower() if v == 'close': close_conn = True elif v == 'keep-alive': close_conn = False elif v == 'upgrade': upgrade = True # encoding enc = headers.get(hdrs.CONTENT_ENCODING) if enc: enc = enc.lower() if enc in ('gzip', 'deflate', 'br'): encoding = enc # chunking te = headers.get(hdrs.TRANSFER_ENCODING) if te and 'chunked' in te.lower(): chunked = True return (headers, raw_headers, close_conn, encoding, upgrade, chunked)
<SYSTEM_TASK:> extra info from connection transport <END_TASK> <USER_TASK:> Description: def get_extra_info(self, name: str, default: Any=None) -> Any: """extra info from connection transport"""
conn = self._response.connection if conn is None: return default transport = conn.transport if transport is None: return default return transport.get_extra_info(name, default)
<SYSTEM_TASK:> Returns an apparently legit user-agent, if not requested one of a specific <END_TASK> <USER_TASK:> Description: def user_agent(style=None) -> _UserAgent: """Returns an apparently legit user-agent, if not requested one of a specific style. Defaults to a Chrome-style User-Agent. """
global useragent if (not useragent) and style: useragent = UserAgent() return useragent[style] if style else DEFAULT_USER_AGENT
<SYSTEM_TASK:> Reloads the response in Chromium, and replaces HTML content <END_TASK> <USER_TASK:> Description: def render(self, retries: int = 8, script: str = None, wait: float = 0.2, scrolldown=False, sleep: int = 0, reload: bool = True, timeout: Union[float, int] = 8.0, keep_page: bool = False): """Reloads the response in Chromium, and replaces HTML content with an updated version, with JavaScript executed. :param retries: The number of times to retry loading the page in Chromium. :param script: JavaScript to execute upon page load (optional). :param wait: The number of seconds to wait before loading the page, preventing timeouts (optional). :param scrolldown: Integer, if provided, of how many times to page down. :param sleep: Integer, if provided, of how many long to sleep after initial render. :param reload: If ``False``, content will not be loaded from the browser, but will be provided from memory. :param keep_page: If ``True`` will allow you to interact with the browser page through ``r.html.page``. If ``scrolldown`` is specified, the page will scrolldown the specified number of times, after sleeping the specified amount of time (e.g. ``scrolldown=10, sleep=1``). If just ``sleep`` is provided, the rendering will wait *n* seconds, before returning. If ``script`` is specified, it will execute the provided JavaScript at runtime. Example: .. code-block:: python script = \"\"\" () => { return { width: document.documentElement.clientWidth, height: document.documentElement.clientHeight, deviceScaleFactor: window.devicePixelRatio, } } \"\"\" Returns the return value of the executed ``script``, if any is provided: .. code-block:: python >>> r.html.render(script=script) {'width': 800, 'height': 600, 'deviceScaleFactor': 1} Warning: the first time you run this method, it will download Chromium into your home directory (``~/.pyppeteer``). """
self.browser = self.session.browser # Automatically create a event loop and browser content = None # Automatically set Reload to False, if example URL is being used. if self.url == DEFAULT_URL: reload = False for i in range(retries): if not content: try: content, result, page = self.session.loop.run_until_complete(self._async_render(url=self.url, script=script, sleep=sleep, wait=wait, content=self.html, reload=reload, scrolldown=scrolldown, timeout=timeout, keep_page=keep_page)) except TypeError: pass else: break if not content: raise MaxRetries("Unable to render the page. Try increasing timeout") html = HTML(url=self.url, html=content.encode(DEFAULT_ENCODING), default_encoding=DEFAULT_ENCODING) self.__dict__.update(html.__dict__) self.page = page return result
<SYSTEM_TASK:> If a browser was created close it first. <END_TASK> <USER_TASK:> Description: def close(self): """ If a browser was created close it first. """
if hasattr(self, "_browser"): self.loop.run_until_complete(self._browser.close()) super().close()
<SYSTEM_TASK:> Partial original request func and run it in a thread. <END_TASK> <USER_TASK:> Description: def request(self, *args, **kwargs): """ Partial original request func and run it in a thread. """
func = partial(super().request, *args, **kwargs) return self.loop.run_in_executor(self.thread_pool, func)
<SYSTEM_TASK:> Pass in all the coroutines you want to run, it will wrap each one <END_TASK> <USER_TASK:> Description: def run(self, *coros): """ Pass in all the coroutines you want to run, it will wrap each one in a task, run it and wait for the result. Return a list with all results, this is returned in the same order coros are passed in. """
tasks = [ asyncio.ensure_future(coro()) for coro in coros ] done, _ = self.loop.run_until_complete(asyncio.wait(tasks)) return [t.result() for t in done]
<SYSTEM_TASK:> Distributed Synchronous SGD Example <END_TASK> <USER_TASK:> Description: def run(params): """ Distributed Synchronous SGD Example """
rank = dist.get_rank() torch.manual_seed(1234) train_set, bsz = partition_dataset() model = Net() model = model optimizer = optim.SGD(model.parameters(), lr=params['learning_rate'], momentum=params['momentum']) num_batches = ceil(len(train_set.dataset) / float(bsz)) total_loss = 0.0 for epoch in range(3): epoch_loss = 0.0 for data, target in train_set: data, target = Variable(data), Variable(target) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) epoch_loss += loss.item() loss.backward() average_gradients(model) optimizer.step() #logger.debug('Rank: ', rank, ', epoch: ', epoch, ': ', epoch_loss / num_batches) if rank == 0: nni.report_intermediate_result(epoch_loss / num_batches) total_loss += (epoch_loss / num_batches) total_loss /= 3 logger.debug('Final loss: {}'.format(total_loss)) if rank == 0: nni.report_final_result(total_loss)
<SYSTEM_TASK:> Computes gradient of the Lovasz extension w.r.t sorted errors <END_TASK> <USER_TASK:> Description: def lovasz_grad(gt_sorted): """ Computes gradient of the Lovasz extension w.r.t sorted errors See Alg. 1 in paper """
p = len(gt_sorted) gts = gt_sorted.sum() intersection = gts - gt_sorted.float().cumsum(0) union = gts + (1 - gt_sorted).float().cumsum(0) jaccard = 1. - intersection / union if p > 1: # cover 1-pixel case jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] return jaccard
<SYSTEM_TASK:> Flattens predictions in the batch <END_TASK> <USER_TASK:> Description: def flatten_probas(probas, labels, ignore=None): """ Flattens predictions in the batch """
B, C, H, W = probas.size() probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C labels = labels.view(-1) if ignore is None: return probas, labels valid = (labels != ignore) vprobas = probas[valid.nonzero().squeeze()] vlabels = labels[valid] return vprobas, vlabels
<SYSTEM_TASK:> nanmean compatible with generators. <END_TASK> <USER_TASK:> Description: def mean(l, ignore_nan=False, empty=0): """ nanmean compatible with generators. """
l = iter(l) if ignore_nan: l = ifilterfalse(np.isnan, l) try: n = 1 acc = next(l) except StopIteration: if empty == 'raise': raise ValueError('Empty mean') return empty for n, v in enumerate(l, 2): acc += v if n == 1: return acc return acc / n
<SYSTEM_TASK:> Change json to search space in hyperopt. <END_TASK> <USER_TASK:> Description: def json2space(in_x, name=ROOT): """ Change json to search space in hyperopt. Parameters ---------- in_x : dict/list/str/int/float The part of json. name : str name could be ROOT, TYPE, VALUE or INDEX. """
out_y = copy.deepcopy(in_x) if isinstance(in_x, dict): if TYPE in in_x.keys(): _type = in_x[TYPE] name = name + '-' + _type _value = json2space(in_x[VALUE], name=name) if _type == 'choice': out_y = eval('hp.hp.'+_type)(name, _value) else: if _type in ['loguniform', 'qloguniform']: _value[:2] = np.log(_value[:2]) out_y = eval('hp.hp.' + _type)(name, *_value) else: out_y = dict() for key in in_x.keys(): out_y[key] = json2space(in_x[key], name+'[%s]' % str(key)) elif isinstance(in_x, list): out_y = list() for i, x_i in enumerate(in_x): out_y.append(json2space(x_i, name+'[%d]' % i)) else: logger.info('in_x is not a dict or a list in json2space fuinction %s', str(in_x)) return out_y
<SYSTEM_TASK:> Update search space definition in tuner by search_space in parameters. <END_TASK> <USER_TASK:> Description: def update_search_space(self, search_space): """ Update search space definition in tuner by search_space in parameters. Will called when first setup experiemnt or update search space in WebUI. Parameters ---------- search_space : dict """
self.json = search_space search_space_instance = json2space(self.json) rstate = np.random.RandomState() trials = hp.Trials() domain = hp.Domain(None, search_space_instance, pass_expr_memo_ctrl=None) algorithm = self._choose_tuner(self.algorithm_name) self.rval = hp.FMinIter(algorithm, domain, trials, max_evals=-1, rstate=rstate, verbose=0) self.rval.catch_eval_exceptions = False
<SYSTEM_TASK:> Record an observation of the objective function <END_TASK> <USER_TASK:> Description: def receive_trial_result(self, parameter_id, parameters, value): """ Record an observation of the objective function Parameters ---------- parameter_id : int parameters : dict value : dict/float if value is dict, it should have "default" key. value is final metrics of the trial. """
reward = extract_scalar_reward(value) # restore the paramsters contains '_index' if parameter_id not in self.total_data: raise RuntimeError('Received parameter_id not in total_data.') params = self.total_data[parameter_id] if self.optimize_mode is OptimizeMode.Maximize: reward = -reward rval = self.rval domain = rval.domain trials = rval.trials new_id = len(trials) rval_specs = [None] rval_results = [domain.new_result()] rval_miscs = [dict(tid=new_id, cmd=domain.cmd, workdir=domain.workdir)] vals = params idxs = dict() out_y = dict() json2vals(self.json, vals, out_y) vals = out_y for key in domain.params: if key in [VALUE, INDEX]: continue if key not in vals or vals[key] is None or vals[key] == []: idxs[key] = vals[key] = [] else: idxs[key] = [new_id] vals[key] = [vals[key]] self.miscs_update_idxs_vals(rval_miscs, idxs, vals, idxs_map={new_id: new_id}, assert_all_vals_used=False) trial = trials.new_trial_docs([new_id], rval_specs, rval_results, rval_miscs)[0] trial['result'] = {'loss': reward, 'status': 'ok'} trial['state'] = hp.JOB_STATE_DONE trials.insert_trial_docs([trial]) trials.refresh()
<SYSTEM_TASK:> Unpack the idxs-vals format into the list of dictionaries that is <END_TASK> <USER_TASK:> Description: def miscs_update_idxs_vals(self, miscs, idxs, vals, assert_all_vals_used=True, idxs_map=None): """ Unpack the idxs-vals format into the list of dictionaries that is `misc`. Parameters ---------- idxs_map : dict idxs_map is a dictionary of id->id mappings so that the misc['idxs'] can contain different numbers than the idxs argument. """
if idxs_map is None: idxs_map = {} assert set(idxs.keys()) == set(vals.keys()) misc_by_id = {m['tid']: m for m in miscs} for m in miscs: m['idxs'] = dict([(key, []) for key in idxs]) m['vals'] = dict([(key, []) for key in idxs]) for key in idxs: assert len(idxs[key]) == len(vals[key]) for tid, val in zip(idxs[key], vals[key]): tid = idxs_map.get(tid, tid) if assert_all_vals_used or tid in misc_by_id: misc_by_id[tid]['idxs'][key] = [tid] misc_by_id[tid]['vals'][key] = [val]
<SYSTEM_TASK:> get suggestion from hyperopt <END_TASK> <USER_TASK:> Description: def get_suggestion(self, random_search=False): """get suggestion from hyperopt Parameters ---------- random_search : bool flag to indicate random search or not (default: {False}) Returns ---------- total_params : dict parameter suggestion """
rval = self.rval trials = rval.trials algorithm = rval.algo new_ids = rval.trials.new_trial_ids(1) rval.trials.refresh() random_state = rval.rstate.randint(2**31-1) if random_search: new_trials = hp.rand.suggest(new_ids, rval.domain, trials, random_state) else: new_trials = algorithm(new_ids, rval.domain, trials, random_state) rval.trials.refresh() vals = new_trials[0]['misc']['vals'] parameter = dict() for key in vals: try: parameter[key] = vals[key][0].item() except (KeyError, IndexError): parameter[key] = None # remove '_index' from json2parameter and save params-id total_params = json2parameter(self.json, parameter) return total_params
<SYSTEM_TASK:> Build char embedding network for the QA model. <END_TASK> <USER_TASK:> Description: def build_char_states(self, char_embed, is_training, reuse, char_ids, char_lengths): """Build char embedding network for the QA model."""
max_char_length = self.cfg.max_char_length inputs = dropout(tf.nn.embedding_lookup(char_embed, char_ids), self.cfg.dropout, is_training) inputs = tf.reshape( inputs, shape=[max_char_length, -1, self.cfg.char_embed_dim]) char_lengths = tf.reshape(char_lengths, shape=[-1]) with tf.variable_scope('char_encoding', reuse=reuse): cell_fw = XGRUCell(hidden_dim=self.cfg.char_embed_dim) cell_bw = XGRUCell(hidden_dim=self.cfg.char_embed_dim) _, (left_right, right_left) = tf.nn.bidirectional_dynamic_rnn( cell_fw=cell_fw, cell_bw=cell_bw, sequence_length=char_lengths, inputs=inputs, time_major=True, dtype=tf.float32 ) left_right = tf.reshape(left_right, shape=[-1, self.cfg.char_embed_dim]) right_left = tf.reshape(right_left, shape=[-1, self.cfg.char_embed_dim]) states = tf.concat([left_right, right_left], axis=1) out_shape = tf.shape(char_ids)[1:3] out_shape = tf.concat([out_shape, tf.constant( value=[self.cfg.char_embed_dim * 2], dtype=tf.int32)], axis=0) return tf.reshape(states, shape=out_shape)