|
""" |
|
The main purpose of this module is to expose LinkCollector.collect_links(). |
|
""" |
|
|
|
import cgi |
|
import functools |
|
import itertools |
|
import logging |
|
import mimetypes |
|
import os |
|
import re |
|
from collections import OrderedDict |
|
|
|
from pip._vendor import html5lib, requests |
|
from pip._vendor.distlib.compat import unescape |
|
from pip._vendor.requests.exceptions import RetryError, SSLError |
|
from pip._vendor.six.moves.urllib import parse as urllib_parse |
|
from pip._vendor.six.moves.urllib import request as urllib_request |
|
|
|
from pip._internal.exceptions import NetworkConnectionError |
|
from pip._internal.models.link import Link |
|
from pip._internal.models.search_scope import SearchScope |
|
from pip._internal.network.utils import raise_for_status |
|
from pip._internal.utils.filetypes import ARCHIVE_EXTENSIONS |
|
from pip._internal.utils.misc import pairwise, redact_auth_from_url |
|
from pip._internal.utils.typing import MYPY_CHECK_RUNNING |
|
from pip._internal.utils.urls import path_to_url, url_to_path |
|
from pip._internal.vcs import is_url, vcs |
|
|
|
if MYPY_CHECK_RUNNING: |
|
from optparse import Values |
|
from typing import ( |
|
Callable, Iterable, List, MutableMapping, Optional, |
|
Protocol, Sequence, Tuple, TypeVar, Union, |
|
) |
|
import xml.etree.ElementTree |
|
|
|
from pip._vendor.requests import Response |
|
|
|
from pip._internal.network.session import PipSession |
|
|
|
HTMLElement = xml.etree.ElementTree.Element |
|
ResponseHeaders = MutableMapping[str, str] |
|
|
|
|
|
F = TypeVar('F') |
|
|
|
class LruCache(Protocol): |
|
def __call__(self, maxsize=None): |
|
|
|
raise NotImplementedError |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
|
def noop_lru_cache(maxsize=None): |
|
|
|
def _wrapper(f): |
|
|
|
return f |
|
return _wrapper |
|
|
|
|
|
_lru_cache = getattr(functools, "lru_cache", noop_lru_cache) |
|
|
|
|
|
def _match_vcs_scheme(url): |
|
|
|
"""Look for VCS schemes in the URL. |
|
|
|
Returns the matched VCS scheme, or None if there's no match. |
|
""" |
|
for scheme in vcs.schemes: |
|
if url.lower().startswith(scheme) and url[len(scheme)] in '+:': |
|
return scheme |
|
return None |
|
|
|
|
|
def _is_url_like_archive(url): |
|
|
|
"""Return whether the URL looks like an archive. |
|
""" |
|
filename = Link(url).filename |
|
for bad_ext in ARCHIVE_EXTENSIONS: |
|
if filename.endswith(bad_ext): |
|
return True |
|
return False |
|
|
|
|
|
class _NotHTML(Exception): |
|
def __init__(self, content_type, request_desc): |
|
|
|
super(_NotHTML, self).__init__(content_type, request_desc) |
|
self.content_type = content_type |
|
self.request_desc = request_desc |
|
|
|
|
|
def _ensure_html_header(response): |
|
|
|
"""Check the Content-Type header to ensure the response contains HTML. |
|
|
|
Raises `_NotHTML` if the content type is not text/html. |
|
""" |
|
content_type = response.headers.get("Content-Type", "") |
|
if not content_type.lower().startswith("text/html"): |
|
raise _NotHTML(content_type, response.request.method) |
|
|
|
|
|
class _NotHTTP(Exception): |
|
pass |
|
|
|
|
|
def _ensure_html_response(url, session): |
|
|
|
"""Send a HEAD request to the URL, and ensure the response contains HTML. |
|
|
|
Raises `_NotHTTP` if the URL is not available for a HEAD request, or |
|
`_NotHTML` if the content type is not text/html. |
|
""" |
|
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) |
|
if scheme not in {'http', 'https'}: |
|
raise _NotHTTP() |
|
|
|
resp = session.head(url, allow_redirects=True) |
|
raise_for_status(resp) |
|
|
|
_ensure_html_header(resp) |
|
|
|
|
|
def _get_html_response(url, session): |
|
|
|
"""Access an HTML page with GET, and return the response. |
|
|
|
This consists of three parts: |
|
|
|
1. If the URL looks suspiciously like an archive, send a HEAD first to |
|
check the Content-Type is HTML, to avoid downloading a large file. |
|
Raise `_NotHTTP` if the content type cannot be determined, or |
|
`_NotHTML` if it is not HTML. |
|
2. Actually perform the request. Raise HTTP exceptions on network failures. |
|
3. Check the Content-Type header to make sure we got HTML, and raise |
|
`_NotHTML` otherwise. |
|
""" |
|
if _is_url_like_archive(url): |
|
_ensure_html_response(url, session=session) |
|
|
|
logger.debug('Getting page %s', redact_auth_from_url(url)) |
|
|
|
resp = session.get( |
|
url, |
|
headers={ |
|
"Accept": "text/html", |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"Cache-Control": "max-age=0", |
|
}, |
|
) |
|
raise_for_status(resp) |
|
|
|
|
|
|
|
|
|
|
|
|
|
_ensure_html_header(resp) |
|
|
|
return resp |
|
|
|
|
|
def _get_encoding_from_headers(headers): |
|
|
|
"""Determine if we have any encoding information in our headers. |
|
""" |
|
if headers and "Content-Type" in headers: |
|
content_type, params = cgi.parse_header(headers["Content-Type"]) |
|
if "charset" in params: |
|
return params['charset'] |
|
return None |
|
|
|
|
|
def _determine_base_url(document, page_url): |
|
|
|
"""Determine the HTML document's base URL. |
|
|
|
This looks for a ``<base>`` tag in the HTML document. If present, its href |
|
attribute denotes the base URL of anchor tags in the document. If there is |
|
no such tag (or if it does not have a valid href attribute), the HTML |
|
file's URL is used as the base URL. |
|
|
|
:param document: An HTML document representation. The current |
|
implementation expects the result of ``html5lib.parse()``. |
|
:param page_url: The URL of the HTML document. |
|
""" |
|
for base in document.findall(".//base"): |
|
href = base.get("href") |
|
if href is not None: |
|
return href |
|
return page_url |
|
|
|
|
|
def _clean_url_path_part(part): |
|
|
|
""" |
|
Clean a "part" of a URL path (i.e. after splitting on "@" characters). |
|
""" |
|
|
|
return urllib_parse.quote(urllib_parse.unquote(part)) |
|
|
|
|
|
def _clean_file_url_path(part): |
|
|
|
""" |
|
Clean the first part of a URL path that corresponds to a local |
|
filesystem path (i.e. the first part after splitting on "@" characters). |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
return urllib_request.pathname2url(urllib_request.url2pathname(part)) |
|
|
|
|
|
|
|
_reserved_chars_re = re.compile('(@|%2F)', re.IGNORECASE) |
|
|
|
|
|
def _clean_url_path(path, is_local_path): |
|
|
|
""" |
|
Clean the path portion of a URL. |
|
""" |
|
if is_local_path: |
|
clean_func = _clean_file_url_path |
|
else: |
|
clean_func = _clean_url_path_part |
|
|
|
|
|
|
|
parts = _reserved_chars_re.split(path) |
|
|
|
cleaned_parts = [] |
|
for to_clean, reserved in pairwise(itertools.chain(parts, [''])): |
|
cleaned_parts.append(clean_func(to_clean)) |
|
|
|
cleaned_parts.append(reserved.upper()) |
|
|
|
return ''.join(cleaned_parts) |
|
|
|
|
|
def _clean_link(url): |
|
|
|
""" |
|
Make sure a link is fully quoted. |
|
For example, if ' ' occurs in the URL, it will be replaced with "%20", |
|
and without double-quoting other characters. |
|
""" |
|
|
|
|
|
result = urllib_parse.urlparse(url) |
|
|
|
is_local_path = not result.netloc |
|
path = _clean_url_path(result.path, is_local_path=is_local_path) |
|
return urllib_parse.urlunparse(result._replace(path=path)) |
|
|
|
|
|
def _create_link_from_element( |
|
anchor, |
|
page_url, |
|
base_url, |
|
): |
|
|
|
""" |
|
Convert an anchor element in a simple repository page to a Link. |
|
""" |
|
href = anchor.get("href") |
|
if not href: |
|
return None |
|
|
|
url = _clean_link(urllib_parse.urljoin(base_url, href)) |
|
pyrequire = anchor.get('data-requires-python') |
|
pyrequire = unescape(pyrequire) if pyrequire else None |
|
|
|
yanked_reason = anchor.get('data-yanked') |
|
if yanked_reason: |
|
|
|
yanked_reason = unescape(yanked_reason) |
|
|
|
link = Link( |
|
url, |
|
comes_from=page_url, |
|
requires_python=pyrequire, |
|
yanked_reason=yanked_reason, |
|
) |
|
|
|
return link |
|
|
|
|
|
class CacheablePageContent(object): |
|
def __init__(self, page): |
|
|
|
assert page.cache_link_parsing |
|
self.page = page |
|
|
|
def __eq__(self, other): |
|
|
|
return (isinstance(other, type(self)) and |
|
self.page.url == other.page.url) |
|
|
|
def __hash__(self): |
|
|
|
return hash(self.page.url) |
|
|
|
|
|
def with_cached_html_pages( |
|
fn, |
|
): |
|
|
|
""" |
|
Given a function that parses an Iterable[Link] from an HTMLPage, cache the |
|
function's result (keyed by CacheablePageContent), unless the HTMLPage |
|
`page` has `page.cache_link_parsing == False`. |
|
""" |
|
|
|
@_lru_cache(maxsize=None) |
|
def wrapper(cacheable_page): |
|
|
|
return list(fn(cacheable_page.page)) |
|
|
|
@functools.wraps(fn) |
|
def wrapper_wrapper(page): |
|
|
|
if page.cache_link_parsing: |
|
return wrapper(CacheablePageContent(page)) |
|
return list(fn(page)) |
|
|
|
return wrapper_wrapper |
|
|
|
|
|
@with_cached_html_pages |
|
def parse_links(page): |
|
|
|
""" |
|
Parse an HTML document, and yield its anchor elements as Link objects. |
|
""" |
|
document = html5lib.parse( |
|
page.content, |
|
transport_encoding=page.encoding, |
|
namespaceHTMLElements=False, |
|
) |
|
|
|
url = page.url |
|
base_url = _determine_base_url(document, url) |
|
for anchor in document.findall(".//a"): |
|
link = _create_link_from_element( |
|
anchor, |
|
page_url=url, |
|
base_url=base_url, |
|
) |
|
if link is None: |
|
continue |
|
yield link |
|
|
|
|
|
class HTMLPage(object): |
|
"""Represents one page, along with its URL""" |
|
|
|
def __init__( |
|
self, |
|
content, |
|
encoding, |
|
url, |
|
cache_link_parsing=True, |
|
): |
|
|
|
""" |
|
:param encoding: the encoding to decode the given content. |
|
:param url: the URL from which the HTML was downloaded. |
|
:param cache_link_parsing: whether links parsed from this page's url |
|
should be cached. PyPI index urls should |
|
have this set to False, for example. |
|
""" |
|
self.content = content |
|
self.encoding = encoding |
|
self.url = url |
|
self.cache_link_parsing = cache_link_parsing |
|
|
|
def __str__(self): |
|
|
|
return redact_auth_from_url(self.url) |
|
|
|
|
|
def _handle_get_page_fail( |
|
link, |
|
reason, |
|
meth=None |
|
): |
|
|
|
if meth is None: |
|
meth = logger.debug |
|
meth("Could not fetch URL %s: %s - skipping", link, reason) |
|
|
|
|
|
def _make_html_page(response, cache_link_parsing=True): |
|
|
|
encoding = _get_encoding_from_headers(response.headers) |
|
return HTMLPage( |
|
response.content, |
|
encoding=encoding, |
|
url=response.url, |
|
cache_link_parsing=cache_link_parsing) |
|
|
|
|
|
def _get_html_page(link, session=None): |
|
|
|
if session is None: |
|
raise TypeError( |
|
"_get_html_page() missing 1 required keyword argument: 'session'" |
|
) |
|
|
|
url = link.url.split('#', 1)[0] |
|
|
|
|
|
vcs_scheme = _match_vcs_scheme(url) |
|
if vcs_scheme: |
|
logger.warning('Cannot look at %s URL %s because it does not support ' |
|
'lookup as web pages.', vcs_scheme, link) |
|
return None |
|
|
|
|
|
scheme, _, path, _, _, _ = urllib_parse.urlparse(url) |
|
if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): |
|
|
|
|
|
if not url.endswith('/'): |
|
url += '/' |
|
url = urllib_parse.urljoin(url, 'index.html') |
|
logger.debug(' file: URL is directory, getting %s', url) |
|
|
|
try: |
|
resp = _get_html_response(url, session=session) |
|
except _NotHTTP: |
|
logger.warning( |
|
'Skipping page %s because it looks like an archive, and cannot ' |
|
'be checked by a HTTP HEAD request.', link, |
|
) |
|
except _NotHTML as exc: |
|
logger.warning( |
|
'Skipping page %s because the %s request got Content-Type: %s.' |
|
'The only supported Content-Type is text/html', |
|
link, exc.request_desc, exc.content_type, |
|
) |
|
except NetworkConnectionError as exc: |
|
_handle_get_page_fail(link, exc) |
|
except RetryError as exc: |
|
_handle_get_page_fail(link, exc) |
|
except SSLError as exc: |
|
reason = "There was a problem confirming the ssl certificate: " |
|
reason += str(exc) |
|
_handle_get_page_fail(link, reason, meth=logger.info) |
|
except requests.ConnectionError as exc: |
|
_handle_get_page_fail(link, "connection error: {}".format(exc)) |
|
except requests.Timeout: |
|
_handle_get_page_fail(link, "timed out") |
|
else: |
|
return _make_html_page(resp, |
|
cache_link_parsing=link.cache_link_parsing) |
|
return None |
|
|
|
|
|
def _remove_duplicate_links(links): |
|
|
|
""" |
|
Return a list of links, with duplicates removed and ordering preserved. |
|
""" |
|
|
|
return list(OrderedDict.fromkeys(links)) |
|
|
|
|
|
def group_locations(locations, expand_dir=False): |
|
|
|
""" |
|
Divide a list of locations into two groups: "files" (archives) and "urls." |
|
|
|
:return: A pair of lists (files, urls). |
|
""" |
|
files = [] |
|
urls = [] |
|
|
|
|
|
def sort_path(path): |
|
|
|
url = path_to_url(path) |
|
if mimetypes.guess_type(url, strict=False)[0] == 'text/html': |
|
urls.append(url) |
|
else: |
|
files.append(url) |
|
|
|
for url in locations: |
|
|
|
is_local_path = os.path.exists(url) |
|
is_file_url = url.startswith('file:') |
|
|
|
if is_local_path or is_file_url: |
|
if is_local_path: |
|
path = url |
|
else: |
|
path = url_to_path(url) |
|
if os.path.isdir(path): |
|
if expand_dir: |
|
path = os.path.realpath(path) |
|
for item in os.listdir(path): |
|
sort_path(os.path.join(path, item)) |
|
elif is_file_url: |
|
urls.append(url) |
|
else: |
|
logger.warning( |
|
"Path '%s' is ignored: it is a directory.", path, |
|
) |
|
elif os.path.isfile(path): |
|
sort_path(path) |
|
else: |
|
logger.warning( |
|
"Url '%s' is ignored: it is neither a file " |
|
"nor a directory.", url, |
|
) |
|
elif is_url(url): |
|
|
|
urls.append(url) |
|
else: |
|
logger.warning( |
|
"Url '%s' is ignored. It is either a non-existing " |
|
"path or lacks a specific scheme.", url, |
|
) |
|
|
|
return files, urls |
|
|
|
|
|
class CollectedLinks(object): |
|
|
|
""" |
|
Encapsulates the return value of a call to LinkCollector.collect_links(). |
|
|
|
The return value includes both URLs to project pages containing package |
|
links, as well as individual package Link objects collected from other |
|
sources. |
|
|
|
This info is stored separately as: |
|
|
|
(1) links from the configured file locations, |
|
(2) links from the configured find_links, and |
|
(3) urls to HTML project pages, as described by the PEP 503 simple |
|
repository API. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
files, |
|
find_links, |
|
project_urls, |
|
): |
|
|
|
""" |
|
:param files: Links from file locations. |
|
:param find_links: Links from find_links. |
|
:param project_urls: URLs to HTML project pages, as described by |
|
the PEP 503 simple repository API. |
|
""" |
|
self.files = files |
|
self.find_links = find_links |
|
self.project_urls = project_urls |
|
|
|
|
|
class LinkCollector(object): |
|
|
|
""" |
|
Responsible for collecting Link objects from all configured locations, |
|
making network requests as needed. |
|
|
|
The class's main method is its collect_links() method. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
session, |
|
search_scope, |
|
): |
|
|
|
self.search_scope = search_scope |
|
self.session = session |
|
|
|
@classmethod |
|
def create(cls, session, options, suppress_no_index=False): |
|
|
|
""" |
|
:param session: The Session to use to make requests. |
|
:param suppress_no_index: Whether to ignore the --no-index option |
|
when constructing the SearchScope object. |
|
""" |
|
index_urls = [options.index_url] + options.extra_index_urls |
|
if options.no_index and not suppress_no_index: |
|
logger.debug( |
|
'Ignoring indexes: %s', |
|
','.join(redact_auth_from_url(url) for url in index_urls), |
|
) |
|
index_urls = [] |
|
|
|
|
|
find_links = options.find_links or [] |
|
|
|
search_scope = SearchScope.create( |
|
find_links=find_links, index_urls=index_urls, |
|
) |
|
link_collector = LinkCollector( |
|
session=session, search_scope=search_scope, |
|
) |
|
return link_collector |
|
|
|
@property |
|
def find_links(self): |
|
|
|
return self.search_scope.find_links |
|
|
|
def fetch_page(self, location): |
|
|
|
""" |
|
Fetch an HTML page containing package links. |
|
""" |
|
return _get_html_page(location, session=self.session) |
|
|
|
def collect_links(self, project_name): |
|
|
|
"""Find all available links for the given project name. |
|
|
|
:return: All the Link objects (unfiltered), as a CollectedLinks object. |
|
""" |
|
search_scope = self.search_scope |
|
index_locations = search_scope.get_index_urls_locations(project_name) |
|
index_file_loc, index_url_loc = group_locations(index_locations) |
|
fl_file_loc, fl_url_loc = group_locations( |
|
self.find_links, expand_dir=True, |
|
) |
|
|
|
file_links = [ |
|
Link(url) for url in itertools.chain(index_file_loc, fl_file_loc) |
|
] |
|
|
|
|
|
find_link_links = [Link(url, '-f') for url in self.find_links] |
|
|
|
|
|
|
|
|
|
url_locations = [ |
|
link for link in itertools.chain( |
|
|
|
|
|
(Link(url, cache_link_parsing=False) for url in index_url_loc), |
|
(Link(url) for url in fl_url_loc), |
|
) |
|
if self.session.is_secure_origin(link) |
|
] |
|
|
|
url_locations = _remove_duplicate_links(url_locations) |
|
lines = [ |
|
'{} location(s) to search for versions of {}:'.format( |
|
len(url_locations), project_name, |
|
), |
|
] |
|
for link in url_locations: |
|
lines.append('* {}'.format(link)) |
|
logger.debug('\n'.join(lines)) |
|
|
|
return CollectedLinks( |
|
files=file_links, |
|
find_links=find_link_links, |
|
project_urls=url_locations, |
|
) |
|
|