desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'When being redirected we may want to strip authentication from the request to avoid leaking credentials. This method intelligently removes and reapplies authentication where possible to avoid credential loss.'
def rebuild_auth(self, prepared_request, response):
headers = prepared_request.headers url = prepared_request.url if ('Authorization' in headers): original_parsed = urlparse(response.request.url) redirect_parsed = urlparse(url) if (original_parsed.hostname != redirect_parsed.hostname): del headers['Authorization'] new_auth = (get_netrc_auth(url) if self.trust_env else None) if (new_auth is not None): prepared_request.prepare_auth(new_auth) return
'This method re-evaluates the proxy configuration by considering the environment variables. If we are redirected to a URL covered by NO_PROXY, we strip the proxy configuration. Otherwise, we set missing proxy keys for this URL (in case they were stripped by a previous redirect). This method also replaces the Proxy-Authorization header where necessary.'
def rebuild_proxies(self, prepared_request, proxies):
headers = prepared_request.headers url = prepared_request.url scheme = urlparse(url).scheme new_proxies = (proxies.copy() if (proxies is not None) else {}) if (self.trust_env and (not should_bypass_proxies(url))): environ_proxies = get_environ_proxies(url) proxy = environ_proxies.get(scheme) if proxy: new_proxies.setdefault(scheme, environ_proxies[scheme]) if ('Proxy-Authorization' in headers): del headers['Proxy-Authorization'] try: (username, password) = get_auth_from_url(new_proxies[scheme]) except KeyError: (username, password) = (None, None) if (username and password): headers['Proxy-Authorization'] = _basic_auth_str(username, password) return new_proxies
'Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it. The :class:`PreparedRequest` has settings merged from the :class:`Request <Request>` instance and those of the :class:`Session`. :param request: :class:`Request` instance to prepare with this session\'s settings.'
def prepare_request(self, request):
cookies = (request.cookies or {}) if (not isinstance(cookies, cookielib.CookieJar)): cookies = cookiejar_from_dict(cookies) merged_cookies = merge_cookies(merge_cookies(RequestsCookieJar(), self.cookies), cookies) auth = request.auth if (self.trust_env and (not auth) and (not self.auth)): auth = get_netrc_auth(request.url) p = PreparedRequest() p.prepare(method=request.method.upper(), url=request.url, files=request.files, data=request.data, json=request.json, headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, hooks=merge_hooks(request.hooks, self.hooks)) return p
'Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``\'filename\': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol or protocol and hostname to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to ``True``. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, (\'cert\', \'key\') pair.'
def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None):
req = Request(method=method.upper(), url=url, headers=headers, files=files, data=(data or {}), json=json, params=(params or {}), auth=auth, cookies=cookies, hooks=hooks) prep = self.prepare_request(req) proxies = (proxies or {}) settings = self.merge_environment_settings(prep.url, proxies, stream, verify, cert) send_kwargs = {'timeout': timeout, 'allow_redirects': allow_redirects} send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) return resp
'Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes.'
def get(self, url, **kwargs):
kwargs.setdefault('allow_redirects', True) return self.request('GET', url, **kwargs)
'Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes.'
def options(self, url, **kwargs):
kwargs.setdefault('allow_redirects', True) return self.request('OPTIONS', url, **kwargs)
'Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes.'
def head(self, url, **kwargs):
kwargs.setdefault('allow_redirects', False) return self.request('HEAD', url, **kwargs)
'Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes.'
def post(self, url, data=None, json=None, **kwargs):
return self.request('POST', url, data=data, json=json, **kwargs)
'Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes.'
def put(self, url, data=None, **kwargs):
return self.request('PUT', url, data=data, **kwargs)
'Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes.'
def patch(self, url, data=None, **kwargs):
return self.request('PATCH', url, data=data, **kwargs)
'Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes.'
def delete(self, url, **kwargs):
return self.request('DELETE', url, **kwargs)
'Send a given PreparedRequest.'
def send(self, request, **kwargs):
kwargs.setdefault('stream', self.stream) kwargs.setdefault('verify', self.verify) kwargs.setdefault('cert', self.cert) kwargs.setdefault('proxies', self.proxies) if (not isinstance(request, PreparedRequest)): raise ValueError('You can only send PreparedRequests.') checked_urls = set() while (request.url in self.redirect_cache): checked_urls.add(request.url) new_url = self.redirect_cache.get(request.url) if (new_url in checked_urls): break request.url = new_url allow_redirects = kwargs.pop('allow_redirects', True) stream = kwargs.get('stream') hooks = request.hooks adapter = self.get_adapter(url=request.url) start = datetime.utcnow() r = adapter.send(request, **kwargs) r.elapsed = (datetime.utcnow() - start) r = dispatch_hook('response', hooks, r, **kwargs) if r.history: for resp in r.history: extract_cookies_to_jar(self.cookies, resp.request, resp.raw) extract_cookies_to_jar(self.cookies, request, r.raw) gen = self.resolve_redirects(r, request, **kwargs) history = ([resp for resp in gen] if allow_redirects else []) if history: history.insert(0, r) r = history.pop() r.history = history if (not stream): r.content return r
'Check the environment and merge it with some settings.'
def merge_environment_settings(self, url, proxies, stream, verify, cert):
if self.trust_env: env_proxies = (get_environ_proxies(url) or {}) for (k, v) in env_proxies.items(): proxies.setdefault(k, v) if ((verify is True) or (verify is None)): verify = (os.environ.get('REQUESTS_CA_BUNDLE') or os.environ.get('CURL_CA_BUNDLE')) proxies = merge_setting(proxies, self.proxies) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) return {'verify': verify, 'proxies': proxies, 'stream': stream, 'cert': cert}
'Returns the appropriate connection adapter for the given URL.'
def get_adapter(self, url):
for (prefix, adapter) in self.adapters.items(): if url.lower().startswith(prefix): return adapter raise InvalidSchema(("No connection adapters were found for '%s'" % url))
'Closes all adapters and as such the session'
def close(self):
for v in self.adapters.values(): v.close()
'Registers a connection adapter to a prefix. Adapters are sorted in descending order by key length.'
def mount(self, prefix, adapter):
self.adapters[prefix] = adapter keys_to_move = [k for k in self.adapters if (len(k) < len(prefix))] for key in keys_to_move: self.adapters[key] = self.adapters.pop(key)
'Like iteritems(), but with all lowercase keys.'
def lower_items(self):
return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
'Initializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.'
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
self._pool_connections = connections self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block, strict=True, **pool_kwargs)
'Return urllib3 ProxyManager for the given proxy. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxy: The proxy to return a urllib3 ProxyManager for. :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. :returns: ProxyManager'
def proxy_manager_for(self, proxy, **proxy_kwargs):
if (not (proxy in self.proxy_manager)): proxy_headers = self.proxy_headers(proxy) self.proxy_manager[proxy] = proxy_from_url(proxy, proxy_headers=proxy_headers, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs) return self.proxy_manager[proxy]
'Verify a SSL certificate. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param conn: The urllib3 connection object associated with the cert. :param url: The requested URL. :param verify: Whether we should actually verify the certificate. :param cert: The SSL certificate to verify.'
def cert_verify(self, conn, url, verify, cert):
if (url.lower().startswith('https') and verify): cert_loc = None if (verify is not True): cert_loc = verify if (not cert_loc): cert_loc = DEFAULT_CA_BUNDLE_PATH if (not cert_loc): raise Exception('Could not find a suitable SSL CA certificate bundle.') conn.cert_reqs = 'CERT_REQUIRED' if (not os.path.isdir(cert_loc)): conn.ca_certs = cert_loc else: conn.ca_cert_dir = cert_loc else: conn.cert_reqs = 'CERT_NONE' conn.ca_certs = None conn.ca_cert_dir = None if cert: if (not isinstance(cert, basestring)): conn.cert_file = cert[0] conn.key_file = cert[1] else: conn.cert_file = cert
'Builds a :class:`Response <requests.Response>` object from a urllib3 response. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>` :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response. :param resp: The urllib3 response object.'
def build_response(self, req, resp):
response = Response() response.status_code = getattr(resp, 'status', None) response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {})) response.encoding = get_encoding_from_headers(response.headers) response.raw = resp response.reason = response.raw.reason if isinstance(req.url, bytes): response.url = req.url.decode('utf-8') else: response.url = req.url extract_cookies_to_jar(response.cookies, req, resp) response.request = req response.connection = self return response
'Returns a urllib3 connection for the given URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param url: The URL to connect to. :param proxies: (optional) A Requests-style dictionary of proxies used on this request.'
def get_connection(self, url, proxies=None):
proxy = select_proxy(url, proxies) if proxy: proxy = prepend_scheme_if_needed(proxy, 'http') proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_url(url) else: parsed = urlparse(url) url = parsed.geturl() conn = self.poolmanager.connection_from_url(url) return conn
'Disposes of any internal state. Currently, this just closes the PoolManager, which closes pooled connections.'
def close(self):
self.poolmanager.clear()
'Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.'
def request_url(self, request, proxies):
proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme if (proxy and (scheme != 'https')): url = urldefragauth(request.url) else: url = request.path_url return url
'Add any headers needed by the connection. As of v2.0 this does nothing by default, but is left for overriding by users that subclass the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to. :param kwargs: The keyword arguments from the call to send().'
def add_headers(self, request, **kwargs):
pass
'Returns a dictionary of the headers to add to any request sent through a proxy. This works with urllib3 magic to ensure that they are correctly sent to the proxy, rather than in a tunnelled request if CONNECT is being used. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxies: The url of the proxy being used for this request.'
def proxy_headers(self, proxy):
headers = {} (username, password) = get_auth_from_url(proxy) if (username and password): headers['Proxy-Authorization'] = _basic_auth_str(username, password) return headers
'Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param verify: (optional) Whether to verify SSL certificates. :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request.'
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
conn = self.get_connection(request.url, proxies) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers(request) chunked = (not ((request.body is None) or ('Content-Length' in request.headers))) if isinstance(timeout, tuple): try: (connect, read) = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError as e: err = 'Invalid timeout {0}. Pass a (connect, read) timeout tuple, or a single float to set both timeouts to the same value'.format(timeout) raise ValueError(err) else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: if (not chunked): resp = conn.urlopen(method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout) else: if hasattr(conn, 'proxy_pool'): conn = conn.proxy_pool low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) try: low_conn.putrequest(request.method, url, skip_accept_encoding=True) for (header, value) in request.headers.items(): low_conn.putheader(header, value) low_conn.endheaders() for i in request.body: low_conn.send(hex(len(i))[2:].encode('utf-8')) low_conn.send('\r\n') low_conn.send(i) low_conn.send('\r\n') low_conn.send('0\r\n\r\n') try: r = low_conn.getresponse(buffering=True) except TypeError: r = low_conn.getresponse() resp = HTTPResponse.from_httplib(r, pool=conn, connection=low_conn, preload_content=False, decode_content=False) except: low_conn.close() raise except (ProtocolError, socket.error) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: if isinstance(e.reason, ConnectTimeoutError): if (not isinstance(e.reason, NewConnectionError)): raise ConnectTimeout(e, request=request) if isinstance(e.reason, ResponseError): raise RetryError(e, request=request) raise ConnectionError(e, request=request) except ClosedPoolError as e: raise ConnectionError(e, request=request) except _ProxyError as e: raise ProxyError(e) except (_SSLError, _HTTPError) as e: if isinstance(e, _SSLError): raise SSLError(e, request=request) elif isinstance(e, ReadTimeoutError): raise ReadTimeout(e, request=request) else: raise return self.build_response(request, resp)
'Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451)'
def is_package(self, fullname):
return hasattr(self.__get_module(fullname), '__path__')
'Return None Required, if is_package is implemented'
def get_code(self, fullname):
self.__get_module(fullname) return None
'Helper for clearing all the keys in a database. Use with caution!'
def clear(self):
for key in self.conn.keys(): self.conn.delete(key)
'Verify our vary headers match and construct a real urllib3 HTTPResponse object.'
def prepare_response(self, request, cached):
if ('*' in cached.get('vary', {})): return for (header, value) in cached.get('vary', {}).items(): if (request.headers.get(header, None) != value): return body_raw = cached['response'].pop('body') try: body = io.BytesIO(body_raw) except TypeError: body = io.BytesIO(body_raw.encode('utf8')) return HTTPResponse(body=body, preload_content=False, **cached['response'])
'Return a valid 1xx warning header value describing the cache adjustments. The response is provided too allow warnings like 113 http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need to explicitly say response is over 24 hours old.'
def warning(self, response):
return '110 - "Response is Stale"'
'Update the response headers with any new headers. NOTE: This SHOULD always include some Warning header to signify that the response was cached by the client, not by way of the provided headers.'
def update_headers(self, response):
return {}
'Send a request. Use the request information to see if it exists in the cache and cache the response if we need to and can.'
def send(self, request, **kw):
if (request.method == 'GET'): cached_response = self.controller.cached_request(request) if cached_response: return self.build_response(request, cached_response, from_cache=True) request.headers.update(self.controller.conditional_headers(request)) resp = super(CacheControlAdapter, self).send(request, **kw) return resp
'Build a response by making a request or using the cache. This will end up calling send and returning a potentially cached response'
def build_response(self, request, response, from_cache=False):
if ((not from_cache) and (request.method == 'GET')): if (response.status == 304): cached_response = self.controller.update_cached_response(request, response) if (cached_response is not response): from_cache = True response.read(decode_content=False) response.release_conn() response = cached_response elif (response.status == 301): self.controller.cache_response(request, response) else: if self.heuristic: response = self.heuristic.apply(response) response._fp = CallbackFileWrapper(response._fp, functools.partial(self.controller.cache_response, request, response)) resp = super(CacheControlAdapter, self).build_response(request, response) if ((request.method in self.invalidating_methods) and resp.ok): cache_url = self.controller.cache_url(request.url) self.cache.delete(cache_url) resp.from_cache = from_cache return resp
'Normalize the URL to create a safe key for the cache'
@classmethod def _urlnorm(cls, uri):
(scheme, authority, path, query, fragment) = parse_uri(uri) if ((not scheme) or (not authority)): raise Exception(('Only absolute URIs are allowed. uri = %s' % uri)) scheme = scheme.lower() authority = authority.lower() if (not path): path = '/' request_uri = ((query and '?'.join([path, query])) or path) defrag_uri = (((scheme + '://') + authority) + request_uri) return defrag_uri
'Parse the cache control headers returning a dictionary with values for the different directives.'
def parse_cache_control(self, headers):
retval = {} cc_header = 'cache-control' if ('Cache-Control' in headers): cc_header = 'Cache-Control' if (cc_header in headers): parts = headers[cc_header].split(',') parts_with_args = [tuple([x.strip().lower() for x in part.split('=', 1)]) for part in parts if ((-1) != part.find('='))] parts_wo_args = [(name.strip().lower(), 1) for name in parts if ((-1) == name.find('='))] retval = dict((parts_with_args + parts_wo_args)) return retval
'Return a cached response if it exists in the cache, otherwise return False.'
def cached_request(self, request):
cache_url = self.cache_url(request.url) logger.debug('Looking up "%s" in the cache', cache_url) cc = self.parse_cache_control(request.headers) if ('no-cache' in cc): logger.debug('Request header has "no-cache", cache bypassed') return False if (('max-age' in cc) and (cc['max-age'] == 0)): logger.debug('Request header has "max_age" as 0, cache bypassed') return False cache_data = self.cache.get(cache_url) if (cache_data is None): logger.debug('No cache entry available') return False resp = self.serializer.loads(request, cache_data) if (not resp): logger.warning('Cache entry deserialization failed, entry ignored') return False if (resp.status == 301): msg = 'Returning cached "301 Moved Permanently" response (ignoring date and etag information)' logger.debug(msg) return resp headers = CaseInsensitiveDict(resp.headers) if ((not headers) or ('date' not in headers)): if ('etag' not in headers): logger.debug('Purging cached response: no date or etag') self.cache.delete(cache_url) logger.debug('Ignoring cached response: no date') return False now = time.time() date = calendar.timegm(parsedate_tz(headers['date'])) current_age = max(0, (now - date)) logger.debug('Current age based on date: %i', current_age) resp_cc = self.parse_cache_control(headers) freshness_lifetime = 0 if (('max-age' in resp_cc) and resp_cc['max-age'].isdigit()): freshness_lifetime = int(resp_cc['max-age']) logger.debug('Freshness lifetime from max-age: %i', freshness_lifetime) elif ('expires' in headers): expires = parsedate_tz(headers['expires']) if (expires is not None): expire_time = (calendar.timegm(expires) - date) freshness_lifetime = max(0, expire_time) logger.debug('Freshness lifetime from expires: %i', freshness_lifetime) if ('max-age' in cc): try: freshness_lifetime = int(cc['max-age']) logger.debug('Freshness lifetime from request max-age: %i', freshness_lifetime) except ValueError: freshness_lifetime = 0 if ('min-fresh' in cc): try: min_fresh = int(cc['min-fresh']) except ValueError: min_fresh = 0 current_age += min_fresh logger.debug('Adjusted current age from min-fresh: %i', current_age) if (freshness_lifetime > current_age): logger.debug('The response is "fresh", returning cached response') logger.debug('%i > %i', freshness_lifetime, current_age) return resp if ('etag' not in headers): logger.debug('The cached response is "stale" with no etag, purging') self.cache.delete(cache_url) return False
'Algorithm for caching requests. This assumes a requests Response object.'
def cache_response(self, request, response, body=None):
cacheable_status_codes = [200, 203, 300, 301] if (response.status not in cacheable_status_codes): logger.debug('Status code %s not in %s', response.status, cacheable_status_codes) return response_headers = CaseInsensitiveDict(response.headers) if ((body is not None) and ('content-length' in response_headers) and response_headers['content-length'].isdigit() and (int(response_headers['content-length']) != len(body))): return cc_req = self.parse_cache_control(request.headers) cc = self.parse_cache_control(response_headers) cache_url = self.cache_url(request.url) logger.debug('Updating cache with response from "%s"', cache_url) no_store = False if cc.get('no-store'): no_store = True logger.debug('Response header has "no-store"') if cc_req.get('no-store'): no_store = True logger.debug('Request header has "no-store"') if (no_store and self.cache.get(cache_url)): logger.debug('Purging existing cache entry to honor "no-store"') self.cache.delete(cache_url) if (self.cache_etags and ('etag' in response_headers)): logger.debug('Caching due to etag') self.cache.set(cache_url, self.serializer.dumps(request, response, body=body)) elif (response.status == 301): logger.debug('Caching permanant redirect') self.cache.set(cache_url, self.serializer.dumps(request, response)) elif ('date' in response_headers): if (cc and cc.get('max-age')): if (int(cc['max-age']) > 0): logger.debug('Caching b/c date exists and max-age > 0') self.cache.set(cache_url, self.serializer.dumps(request, response, body=body)) elif ('expires' in response_headers): if response_headers['expires']: logger.debug('Caching b/c of expires header') self.cache.set(cache_url, self.serializer.dumps(request, response, body=body))
'On a 304 we will get a new set of headers that we want to update our cached value with, assuming we have one. This should only ever be called when we\'ve sent an ETag and gotten a 304 as the response.'
def update_cached_response(self, request, response):
cache_url = self.cache_url(request.url) cached_response = self.serializer.loads(request, self.cache.get(cache_url)) if (not cached_response): return response excluded_headers = ['content-length'] cached_response.headers.update(dict(((k, v) for (k, v) in response.headers.items() if (k.lower() not in excluded_headers)))) cached_response.status = 200 self.cache.set(cache_url, self.serializer.dumps(request, cached_response)) return cached_response
'This is where the magic happens. We do our usually processing through the states and when we have a token to return we yield the token which pauses processing until the next token is requested.'
def __iter__(self):
self.tokenQueue = deque([]) while self.state(): while self.stream.errors: (yield {u'type': tokenTypes[u'ParseError'], u'data': self.stream.errors.pop(0)}) while self.tokenQueue: (yield self.tokenQueue.popleft())
'This function returns either U+FFFD or the character based on the decimal or hexadecimal representation. It also discards ";" if present. If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.'
def consumeNumberEntity(self, isHex):
allowed = digits radix = 10 if isHex: allowed = hexDigits radix = 16 charStack = [] c = self.stream.char() while ((c in allowed) and (c is not EOF)): charStack.append(c) c = self.stream.char() charAsInt = int(u''.join(charStack), radix) if (charAsInt in replacementCharacters): char = replacementCharacters[charAsInt] self.tokenQueue.append({u'type': tokenTypes[u'ParseError'], u'data': u'illegal-codepoint-for-numeric-entity', u'datavars': {u'charAsInt': charAsInt}}) elif ((55296 <= charAsInt <= 57343) or (charAsInt > 1114111)): char = u'\ufffd' self.tokenQueue.append({u'type': tokenTypes[u'ParseError'], u'data': u'illegal-codepoint-for-numeric-entity', u'datavars': {u'charAsInt': charAsInt}}) else: if ((1 <= charAsInt <= 8) or (14 <= charAsInt <= 31) or (127 <= charAsInt <= 159) or (64976 <= charAsInt <= 65007) or (charAsInt in frozenset([11, 65534, 65535, 131070, 131071, 196606, 196607, 262142, 262143, 327678, 327679, 393214, 393215, 458750, 458751, 524286, 524287, 589822, 589823, 655358, 655359, 720894, 720895, 786430, 786431, 851966, 851967, 917502, 917503, 983038, 983039, 1048574, 1048575, 1114110, 1114111]))): self.tokenQueue.append({u'type': tokenTypes[u'ParseError'], u'data': u'illegal-codepoint-for-numeric-entity', u'datavars': {u'charAsInt': charAsInt}}) try: char = chr(charAsInt) except ValueError: v = (charAsInt - 65536) char = (chr((55296 | (v >> 10))) + chr((56320 | (v & 1023)))) if (c != u';'): self.tokenQueue.append({u'type': tokenTypes[u'ParseError'], u'data': u'numeric-entity-without-semicolon'}) self.stream.unget(c) return char
'This method replaces the need for "entityInAttributeValueState".'
def processEntityInAttribute(self, allowedChar):
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
'This method is a generic handler for emitting the tags. It also sets the state to "data" because that\'s what\'s needed after a token has been emitted.'
def emitCurrentToken(self):
token = self.currentToken if (token[u'type'] in tagTokenTypes): if self.lowercaseElementName: token[u'name'] = token[u'name'].translate(asciiUpper2Lower) if (token[u'type'] == tokenTypes[u'EndTag']): if token[u'data']: self.tokenQueue.append({u'type': tokenTypes[u'ParseError'], u'data': u'attributes-in-end-tag'}) if token[u'selfClosing']: self.tokenQueue.append({u'type': tokenTypes[u'ParseError'], u'data': u'self-closing-flag-on-end-tag'}) self.tokenQueue.append(token) self.state = self.dataState
'Initialize HTMLSerializer. Keyword options (default given first unless specified) include: inject_meta_charset=True|False Whether it insert a meta element to define the character set of the document. quote_attr_values=True|False Whether to quote attribute values that don\'t require quoting per HTML5 parsing rules. quote_char=u\'"\'|u"\'" Use given quote character for attribute quoting. Default is to use double quote unless attribute value contains a double quote, in which case single quotes are used instead. escape_lt_in_attrs=False|True Whether to escape < in attribute values. escape_rcdata=False|True Whether to escape characters that need to be escaped within normal elements within rcdata elements such as style. resolve_entities=True|False Whether to resolve named character entities that appear in the source tree. The XML predefined entities &lt; &gt; &amp; &quot; &apos; are unaffected by this setting. strip_whitespace=False|True Whether to remove semantically meaningless whitespace. (This compresses all whitespace to a single space except within pre.) minimize_boolean_attributes=True|False Shortens boolean attributes to give just the attribute value, for example <input disabled="disabled"> becomes <input disabled>. use_trailing_solidus=False|True Includes a close-tag slash at the end of the start tag of void elements (empty elements whose end tag is forbidden). E.g. <hr/>. space_before_trailing_solidus=True|False Places a space immediately before the closing slash in a tag using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus. sanitize=False|True Strip all unsafe or unknown constructs from output. See `html5lib user documentation`_ omit_optional_tags=True|False Omit start/end tags that are optional. alphabetical_attributes=False|True Reorder attributes to be in alphabetical order. .. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation'
def __init__(self, **kwargs):
if (u'quote_char' in kwargs): self.use_best_quote_char = False for attr in self.options: setattr(self, attr, kwargs.get(attr, getattr(self, attr))) self.errors = [] self.strict = False
'Node representing an item in the tree. name - The tag name associated with the node parent - The parent of the current node (or None for the document node) value - The value of the current node (applies to text nodes and comments attributes - a dict holding name, value pairs for attributes of the node childNodes - a list of child nodes of the current node. This must include all elements but not necessarily other node types _flags - A list of miscellaneous flags that can be set on the node'
def __init__(self, name):
self.name = name self.parent = None self.value = None self.attributes = {} self.childNodes = [] self._flags = []
'Insert node as a child of the current node'
def appendChild(self, node):
raise NotImplementedError
'Insert data as text in the current node, positioned before the start of node insertBefore or to the end of the node\'s text.'
def insertText(self, data, insertBefore=None):
raise NotImplementedError
'Insert node as a child of the current node, before refNode in the list of child nodes. Raises ValueError if refNode is not a child of the current node'
def insertBefore(self, node, refNode):
raise NotImplementedError
'Remove node from the children of the current node'
def removeChild(self, node):
raise NotImplementedError
'Move all the children of the current node to newParent. This is needed so that trees that don\'t store text as nodes move the text in the correct way'
def reparentChildren(self, newParent):
for child in self.childNodes: newParent.appendChild(child) self.childNodes = []
'Return a shallow copy of the current node i.e. a node with the same name and attributes but with no parent or child nodes'
def cloneNode(self):
raise NotImplementedError
'Return true if the node has children or text, false otherwise'
def hasContent(self):
raise NotImplementedError
'Check if an element exists between the end of the active formatting elements and the last marker. If it does, return it, else return false'
def elementInActiveFormattingElements(self, name):
for item in self.activeFormattingElements[::(-1)]: if (item == Marker): break elif (item.name == name): return item return False
'Create an element but don\'t insert it anywhere'
def createElement(self, token):
name = token[u'name'] namespace = token.get(u'namespace', self.defaultNamespace) element = self.elementClass(name, namespace) element.attributes = token[u'data'] return element
'Switch the function used to insert an element from the normal one to the misnested table one and back again'
def _setInsertFromTable(self, value):
self._insertFromTable = value if value: self.insertElement = self.insertElementTable else: self.insertElement = self.insertElementNormal
'Create an element and insert it into the tree'
def insertElementTable(self, token):
element = self.createElement(token) if (self.openElements[(-1)].name not in tableInsertModeElements): return self.insertElementNormal(token) else: (parent, insertBefore) = self.getTableMisnestedNodePosition() if (insertBefore is None): parent.appendChild(element) else: parent.insertBefore(element, insertBefore) self.openElements.append(element) return element
'Insert text data.'
def insertText(self, data, parent=None):
if (parent is None): parent = self.openElements[(-1)] if ((not self.insertFromTable) or (self.insertFromTable and (self.openElements[(-1)].name not in tableInsertModeElements))): parent.insertText(data) else: (parent, insertBefore) = self.getTableMisnestedNodePosition() parent.insertText(data, insertBefore)
'Get the foster parent element, and sibling to insert before (or None) when inserting a misnested table node'
def getTableMisnestedNodePosition(self):
lastTable = None fosterParent = None insertBefore = None for elm in self.openElements[::(-1)]: if (elm.name == u'table'): lastTable = elm break if lastTable: if lastTable.parent: fosterParent = lastTable.parent insertBefore = lastTable else: fosterParent = self.openElements[(self.openElements.index(lastTable) - 1)] else: fosterParent = self.openElements[0] return (fosterParent, insertBefore)
'Return the final tree'
def getDocument(self):
return self.document
'Return the final fragment'
def getFragment(self):
fragment = self.fragmentClass() self.openElements[0].reparentChildren(fragment) return fragment
'Serialize the subtree of node in the format required by unit tests node - the node from which to start serializing'
def testSerializer(self, node):
raise NotImplementedError
'Create the document root'
def insertRoot(self, token):
docStr = u'' if self.doctype: assert self.doctype.name docStr += (u'<!DOCTYPE %s' % self.doctype.name) if ((self.doctype.publicId is not None) or (self.doctype.systemId is not None)): docStr += (u' PUBLIC "%s" ' % self.infosetFilter.coercePubid((self.doctype.publicId or u''))) if self.doctype.systemId: sysid = self.doctype.systemId if ((sysid.find(u"'") >= 0) and (sysid.find(u'"') >= 0)): warnings.warn(u'DOCTYPE system cannot contain single and double quotes', DataLossWarning) sysid = sysid.replace(u"'", u'U00027') if (sysid.find(u"'") >= 0): docStr += (u'"%s"' % sysid) else: docStr += (u"'%s'" % sysid) else: docStr += u"''" docStr += u'>' if (self.doctype.name != token[u'name']): warnings.warn(u'lxml cannot represent doctype with a different name to the root element', DataLossWarning) docStr += u'<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>' root = etree.fromstring(docStr) for comment_token in self.initial_comments: root.addprevious(etree.Comment(comment_token[u'data'])) self.document = self.documentClass() self.document._elementTree = root.getroottree() name = token[u'name'] namespace = token.get(u'namespace', self.defaultNamespace) if (namespace is None): etree_tag = name else: etree_tag = (u'{%s}%s' % (namespace, name)) root.tag = etree_tag root_element = self.elementClass(name, namespace) root_element._element = root self.document._childNodes.append(root_element) self.openElements.append(root_element) self.insertComment = self.insertCommentMain
'strict - raise an exception when a parse error is encountered tree - a treebuilder class controlling the type of tree that will be returned. Built in treebuilders can be accessed through html5lib.treebuilders.getTreeBuilder(treeType) tokenizer - a class that provides a stream of tokens to the treebuilder. This may be replaced for e.g. a sanitizer which converts some tags to text'
def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer, strict=False, namespaceHTMLElements=True, debug=False):
self.strict = strict if (tree is None): tree = treebuilders.getTreeBuilder(u'etree') self.tree = tree(namespaceHTMLElements) self.tokenizer_class = tokenizer self.errors = [] self.phases = dict([(name, cls(self, self.tree)) for (name, cls) in getPhases(debug).items()])
'The name of the character encoding that was used to decode the input stream, or :obj:`None` if that is not determined yet.'
@property def documentEncoding(self):
if (not hasattr(self, u'tokenizer')): return None return self.tokenizer.stream.charEncoding[0]
'Parse a HTML document into a well-formed tree stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element)'
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
self._parse(stream, innerHTML=False, encoding=encoding, parseMeta=parseMeta, useChardet=useChardet) return self.tree.getDocument()
'Parse a HTML fragment into a well-formed tree fragment container - name of the element we\'re setting the innerHTML property if set to None, default to \'div\' stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element)'
def parseFragment(self, stream, container=u'div', encoding=None, parseMeta=False, useChardet=True):
self._parse(stream, True, container=container, encoding=encoding) return self.tree.getFragment()
'HTML5 specific normalizations to the token stream'
def normalizeToken(self, token):
if (token[u'type'] == tokenTypes[u'StartTag']): token[u'data'] = dict(token[u'data'][::(-1)]) return token
'Generic RCDATA/RAWTEXT Parsing algorithm contentType - RCDATA or RAWTEXT'
def parseRCDataRawtext(self, token, contentType):
assert (contentType in (u'RAWTEXT', u'RCDATA')) self.tree.insertElement(token) if (contentType == u'RAWTEXT'): self.tokenizer.state = self.tokenizer.rawtextState else: self.tokenizer.state = self.tokenizer.rcdataState self.originalPhase = self.phase self.phase = self.phases[u'text']
'Initialises the HTMLInputStream. HTMLInputStream(source, [encoding]) -> Normalized stream from source for use by html5lib. source can be either a file-object, local filename or a string. The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) parseMeta - Look for a <meta> element containing encoding information'
def __init__(self, source):
if (not utils.supports_lone_surrogates): self.reportCharacterErrors = None self.replaceCharactersRegexp = None elif (len(u'\U0010ffff') == 1): self.reportCharacterErrors = self.characterErrorsUCS4 self.replaceCharactersRegexp = re.compile(eval(u'"[\\uD800-\\uDFFF]"')) else: self.reportCharacterErrors = self.characterErrorsUCS2 self.replaceCharactersRegexp = re.compile(eval(u'"([\\uD800-\\uDBFF](?![\\uDC00-\\uDFFF])|(?<![\\uD800-\\uDBFF])[\\uDC00-\\uDFFF])"')) self.newLines = [0] self.charEncoding = (u'utf-8', u'certain') self.dataStream = self.openStream(source) self.reset()
'Produces a file object from source. source can be either a file object, local filename or a string.'
def openStream(self, source):
if hasattr(source, u'read'): stream = source else: stream = StringIO(source) return stream
'Returns (line, col) of the current position in the stream.'
def position(self):
(line, col) = self._position(self.chunkOffset) return ((line + 1), col)
'Read one character from the stream or queue if available. Return EOF when EOF is reached.'
def char(self):
if (self.chunkOffset >= self.chunkSize): if (not self.readChunk()): return EOF chunkOffset = self.chunkOffset char = self.chunk[chunkOffset] self.chunkOffset = (chunkOffset + 1) return char
'Returns a string of characters from the stream up to but not including any character in \'characters\' or EOF. \'characters\' must be a container that supports the \'in\' method and iteration over its characters.'
def charsUntil(self, characters, opposite=False):
try: chars = charsUntilRegEx[(characters, opposite)] except KeyError: if __debug__: for c in characters: assert (ord(c) < 128) regex = u''.join([(u'\\x%02x' % ord(c)) for c in characters]) if (not opposite): regex = (u'^%s' % regex) chars = charsUntilRegEx[(characters, opposite)] = re.compile((u'[%s]+' % regex)) rv = [] while True: m = chars.match(self.chunk, self.chunkOffset) if (m is None): if (self.chunkOffset != self.chunkSize): break else: end = m.end() if (end != self.chunkSize): rv.append(self.chunk[self.chunkOffset:end]) self.chunkOffset = end break rv.append(self.chunk[self.chunkOffset:]) if (not self.readChunk()): break r = u''.join(rv) return r
'Initialises the HTMLInputStream. HTMLInputStream(source, [encoding]) -> Normalized stream from source for use by html5lib. source can be either a file-object, local filename or a string. The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) parseMeta - Look for a <meta> element containing encoding information'
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
self.rawStream = self.openStream(source) HTMLUnicodeInputStream.__init__(self, self.rawStream) self.charEncoding = (codecName(encoding), u'certain') self.numBytesMeta = 512 self.numBytesChardet = 100 self.defaultEncoding = u'windows-1252' if (self.charEncoding[0] is None): self.charEncoding = self.detectEncoding(parseMeta, chardet) self.reset()
'Produces a file object from source. source can be either a file object, local filename or a string.'
def openStream(self, source):
if hasattr(source, u'read'): stream = source else: stream = BytesIO(source) try: stream.seek(stream.tell()) except: stream = BufferedStream(stream) return stream
'Attempts to detect at BOM at the start of the stream. If an encoding can be determined from the BOM return the name of the encoding otherwise return None'
def detectBOM(self):
bomDict = {codecs.BOM_UTF8: u'utf-8', codecs.BOM_UTF16_LE: u'utf-16-le', codecs.BOM_UTF16_BE: u'utf-16-be', codecs.BOM_UTF32_LE: u'utf-32-le', codecs.BOM_UTF32_BE: u'utf-32-be'} string = self.rawStream.read(4) assert isinstance(string, bytes) encoding = bomDict.get(string[:3]) seek = 3 if (not encoding): encoding = bomDict.get(string) seek = 4 if (not encoding): encoding = bomDict.get(string[:2]) seek = 2 self.rawStream.seek(((encoding and seek) or 0)) return encoding
'Report the encoding declared by the meta element'
def detectEncodingMeta(self):
buffer = self.rawStream.read(self.numBytesMeta) assert isinstance(buffer, bytes) parser = EncodingParser(buffer) self.rawStream.seek(0) encoding = parser.getEncoding() if (encoding in (u'utf-16', u'utf-16-be', u'utf-16-le')): encoding = u'utf-8' return encoding
'Skip past a list of characters'
def skip(self, chars=spaceCharactersBytes):
p = self.position while (p < len(self)): c = self[p:(p + 1)] if (c not in chars): self._position = p return c p += 1 self._position = p return None
'Look for a sequence of bytes at the start of a string. If the bytes are found return True and advance the position to the byte after the match. Otherwise return False and leave the position alone'
def matchBytes(self, bytes):
p = self.position data = self[p:(p + len(bytes))] rv = data.startswith(bytes) if rv: self.position += len(bytes) return rv
'Look for the next sequence of bytes matching a given sequence. If a match is found advance the position to the last byte of the match'
def jumpTo(self, bytes):
newPosition = self[self.position:].find(bytes) if (newPosition > (-1)): if (self._position == (-1)): self._position = 0 self._position += ((newPosition + len(bytes)) - 1) return True else: raise StopIteration
'string - the data to work on for encoding detection'
def __init__(self, data):
self.data = EncodingBytes(data) self.encoding = None
'Skip over comments'
def handleComment(self):
return self.data.jumpTo('-->')
'Return a name,value pair for the next attribute in the stream, if one is found, or None'
def getAttribute(self):
data = self.data c = data.skip((spaceCharactersBytes | frozenset(['/']))) assert ((c is None) or (len(c) == 1)) if (c in ('>', None)): return None attrName = [] attrValue = [] while True: if ((c == '=') and attrName): break elif (c in spaceCharactersBytes): c = data.skip() break elif (c in ('/', '>')): return (''.join(attrName), '') elif (c in asciiUppercaseBytes): attrName.append(c.lower()) elif (c is None): return None else: attrName.append(c) c = next(data) if (c != '='): data.previous() return (''.join(attrName), '') next(data) c = data.skip() if (c in ("'", '"')): quoteChar = c while True: c = next(data) if (c == quoteChar): next(data) return (''.join(attrName), ''.join(attrValue)) elif (c in asciiUppercaseBytes): attrValue.append(c.lower()) else: attrValue.append(c) elif (c == '>'): return (''.join(attrName), '') elif (c in asciiUppercaseBytes): attrValue.append(c.lower()) elif (c is None): return None else: attrValue.append(c) while True: c = next(data) if (c in spacesAngleBrackets): return (''.join(attrName), ''.join(attrValue)) elif (c in asciiUppercaseBytes): attrValue.append(c.lower()) elif (c is None): return None else: attrValue.append(c)
'Initialise an instance. :param url: The URL of the index. If not specified, the URL for PyPI is used.'
def __init__(self, url=None):
self.url = (url or DEFAULT_INDEX) self.read_configuration() (scheme, netloc, path, params, query, frag) = urlparse(self.url) if (params or query or frag or (scheme not in ('http', 'https'))): raise DistlibException(('invalid repository: %s' % self.url)) self.password_handler = None self.ssl_verifier = None self.gpg = None self.gpg_home = None self.rpc_proxy = None with open(os.devnull, 'w') as sink: for s in ('gpg2', 'gpg'): try: rc = subprocess.check_call([s, '--version'], stdout=sink, stderr=sink) if (rc == 0): self.gpg = s break except OSError: pass
'Get the distutils command for interacting with PyPI configurations. :return: the command.'
def _get_pypirc_command(self):
from distutils.core import Distribution from distutils.config import PyPIRCCommand d = Distribution() return PyPIRCCommand(d)
'Read the PyPI access configuration as supported by distutils, getting PyPI to do the acutal work. This populates ``username``, ``password``, ``realm`` and ``url`` attributes from the configuration.'
def read_configuration(self):
c = self._get_pypirc_command() c.repository = self.url cfg = c._read_pypirc() self.username = cfg.get('username') self.password = cfg.get('password') self.realm = cfg.get('realm', 'pypi') self.url = cfg.get('repository', self.url)
'Save the PyPI access configuration. You must have set ``username`` and ``password`` attributes before calling this method. Again, distutils is used to do the actual work.'
def save_configuration(self):
self.check_credentials() c = self._get_pypirc_command() c._store_pypirc(self.username, self.password)
'Check that ``username`` and ``password`` have been set, and raise an exception if not.'
def check_credentials(self):
if ((self.username is None) or (self.password is None)): raise DistlibException('username and password must be set') pm = HTTPPasswordMgr() (_, netloc, _, _, _, _) = urlparse(self.url) pm.add_password(self.realm, netloc, self.username, self.password) self.password_handler = HTTPBasicAuthHandler(pm)
'Register a distribution on PyPI, using the provided metadata. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the distribution to be registered. :return: The HTTP response received from PyPI upon submission of the request.'
def register(self, metadata):
self.check_credentials() metadata.validate() d = metadata.todict() d[':action'] = 'verify' request = self.encode_request(d.items(), []) response = self.send_request(request) d[':action'] = 'submit' request = self.encode_request(d.items(), []) return self.send_request(request)
'Thread runner for reading lines of from a subprocess into a buffer. :param name: The logical name of the stream (used for logging only). :param stream: The stream to read from. This will typically a pipe connected to the output stream of a subprocess. :param outbuf: The list to append the read lines to.'
def _reader(self, name, stream, outbuf):
while True: s = stream.readline() if (not s): break s = s.decode('utf-8').rstrip() outbuf.append(s) logger.debug(('%s: %s' % (name, s))) stream.close()
'Return a suitable command for signing a file. :param filename: The pathname to the file to be signed. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer\'s private key used for signing. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance\'s ``gpg_home`` attribute is used instead. :return: The signing command as a list suitable to be passed to :class:`subprocess.Popen`.'
def get_sign_command(self, filename, signer, sign_password, keystore=None):
cmd = [self.gpg, '--status-fd', '2', '--no-tty'] if (keystore is None): keystore = self.gpg_home if keystore: cmd.extend(['--homedir', keystore]) if (sign_password is not None): cmd.extend(['--batch', '--passphrase-fd', '0']) td = tempfile.mkdtemp() sf = os.path.join(td, (os.path.basename(filename) + '.asc')) cmd.extend(['--detach-sign', '--armor', '--local-user', signer, '--output', sf, filename]) logger.debug('invoking: %s', ' '.join(cmd)) return (cmd, sf)
'Run a command in a child process , passing it any input data specified. :param cmd: The command to run. :param input_data: If specified, this must be a byte string containing data to be sent to the child process. :return: A tuple consisting of the subprocess\' exit code, a list of lines read from the subprocess\' ``stdout``, and a list of lines read from the subprocess\' ``stderr``.'
def run_command(self, cmd, input_data=None):
kwargs = {'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE} if (input_data is not None): kwargs['stdin'] = subprocess.PIPE stdout = [] stderr = [] p = subprocess.Popen(cmd, **kwargs) t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout)) t1.start() t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr)) t2.start() if (input_data is not None): p.stdin.write(input_data) p.stdin.close() p.wait() t1.join() t2.join() return (p.returncode, stdout, stderr)
'Sign a file. :param filename: The pathname to the file to be signed. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer\'s private key used for signing. :param keystore: The path to a directory which contains the keys used in signing. If not specified, the instance\'s ``gpg_home`` attribute is used instead. :return: The absolute pathname of the file where the signature is stored.'
def sign_file(self, filename, signer, sign_password, keystore=None):
(cmd, sig_file) = self.get_sign_command(filename, signer, sign_password, keystore) (rc, stdout, stderr) = self.run_command(cmd, sign_password.encode('utf-8')) if (rc != 0): raise DistlibException(('sign command failed with error code %s' % rc)) return sig_file
'Upload a release file to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the file to be uploaded. :param filename: The pathname of the file to be uploaded. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer\'s private key used for signing. :param filetype: The type of the file being uploaded. This is the distutils command which produced that file, e.g. ``sdist`` or ``bdist_wheel``. :param pyversion: The version of Python which the release relates to. For code compatible with any Python, this would be ``source``, otherwise it would be e.g. ``3.2``. :param keystore: The path to a directory which contains the keys used in signing. If not specified, the instance\'s ``gpg_home`` attribute is used instead. :return: The HTTP response received from PyPI upon submission of the request.'
def upload_file(self, metadata, filename, signer=None, sign_password=None, filetype='sdist', pyversion='source', keystore=None):
self.check_credentials() if (not os.path.exists(filename)): raise DistlibException(('not found: %s' % filename)) metadata.validate() d = metadata.todict() sig_file = None if signer: if (not self.gpg): logger.warning('no signing program available - not signed') else: sig_file = self.sign_file(filename, signer, sign_password, keystore) with open(filename, 'rb') as f: file_data = f.read() md5_digest = hashlib.md5(file_data).hexdigest() sha256_digest = hashlib.sha256(file_data).hexdigest() d.update({':action': 'file_upload', 'protcol_version': '1', 'filetype': filetype, 'pyversion': pyversion, 'md5_digest': md5_digest, 'sha256_digest': sha256_digest}) files = [('content', os.path.basename(filename), file_data)] if sig_file: with open(sig_file, 'rb') as f: sig_data = f.read() files.append(('gpg_signature', os.path.basename(sig_file), sig_data)) shutil.rmtree(os.path.dirname(sig_file)) request = self.encode_request(d.items(), files) return self.send_request(request)
'Upload documentation to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the documentation to be uploaded. :param doc_dir: The pathname of the directory which contains the documentation. This should be the directory that contains the ``index.html`` for the documentation. :return: The HTTP response received from PyPI upon submission of the request.'
def upload_documentation(self, metadata, doc_dir):
self.check_credentials() if (not os.path.isdir(doc_dir)): raise DistlibException(('not a directory: %r' % doc_dir)) fn = os.path.join(doc_dir, 'index.html') if (not os.path.exists(fn)): raise DistlibException(('not found: %r' % fn)) metadata.validate() (name, version) = (metadata.name, metadata.version) zip_data = zip_dir(doc_dir).getvalue() fields = [(':action', 'doc_upload'), ('name', name), ('version', version)] files = [('content', name, zip_data)] request = self.encode_request(fields, files) return self.send_request(request)
'Return a suitable command for verifying a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance\'s ``gpg_home`` attribute is used instead. :return: The verifying command as a list suitable to be passed to :class:`subprocess.Popen`.'
def get_verify_command(self, signature_filename, data_filename, keystore=None):
cmd = [self.gpg, '--status-fd', '2', '--no-tty'] if (keystore is None): keystore = self.gpg_home if keystore: cmd.extend(['--homedir', keystore]) cmd.extend(['--verify', signature_filename, data_filename]) logger.debug('invoking: %s', ' '.join(cmd)) return cmd
'Verify a signature for a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance\'s ``gpg_home`` attribute is used instead. :return: True if the signature was verified, else False.'
def verify_signature(self, signature_filename, data_filename, keystore=None):
if (not self.gpg): raise DistlibException('verification unavailable because gpg unavailable') cmd = self.get_verify_command(signature_filename, data_filename, keystore) (rc, stdout, stderr) = self.run_command(cmd) if (rc not in (0, 1)): raise DistlibException(('verify command failed with error code %s' % rc)) return (rc == 0)
'This is a convenience method for downloading a file from an URL. Normally, this will be a file from the index, though currently no check is made for this (i.e. a file can be downloaded from anywhere). The method is just like the :func:`urlretrieve` function in the standard library, except that it allows digest computation to be done during download and checking that the downloaded data matched any expected value. :param url: The URL of the file to be downloaded (assumed to be available via an HTTP GET request). :param destfile: The pathname where the downloaded file is to be saved. :param digest: If specified, this must be a (hasher, value) tuple, where hasher is the algorithm used (e.g. ``\'md5\'``) and ``value`` is the expected value. :param reporthook: The same as for :func:`urlretrieve` in the standard library.'
def download_file(self, url, destfile, digest=None, reporthook=None):
if (digest is None): digester = None logger.debug('No digest specified') else: if isinstance(digest, (list, tuple)): (hasher, digest) = digest else: hasher = 'md5' digester = getattr(hashlib, hasher)() logger.debug(('Digest specified: %s' % digest)) with open(destfile, 'wb') as dfp: sfp = self.send_request(Request(url)) try: headers = sfp.info() blocksize = 8192 size = (-1) read = 0 blocknum = 0 if ('content-length' in headers): size = int(headers['Content-Length']) if reporthook: reporthook(blocknum, blocksize, size) while True: block = sfp.read(blocksize) if (not block): break read += len(block) dfp.write(block) if digester: digester.update(block) blocknum += 1 if reporthook: reporthook(blocknum, blocksize, size) finally: sfp.close() if ((size >= 0) and (read < size)): raise DistlibException(('retrieval incomplete: got only %d out of %d bytes' % (read, size))) if digester: actual = digester.hexdigest() if (digest != actual): raise DistlibException(('%s digest mismatch for %s: expected %s, got %s' % (hasher, destfile, digest, actual))) logger.debug('Digest verified: %s', digest)
'Send a standard library :class:`Request` to PyPI and return its response. :param req: The request to send. :return: The HTTP response from PyPI (a standard library HTTPResponse).'
def send_request(self, req):
handlers = [] if self.password_handler: handlers.append(self.password_handler) if self.ssl_verifier: handlers.append(self.ssl_verifier) opener = build_opener(*handlers) return opener.open(req)