desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean Returns a Link if found, Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise'
def find_requirement(self, req, upgrade):
all_candidates = self.find_all_candidates(req.name) compatible_versions = set(req.specifier.filter([str(c.version) for c in all_candidates], prereleases=(self.allow_all_prereleases if self.allow_all_prereleases else None))) applicable_candidates = [c for c in all_candidates if (str(c.version) in compatible_versions)] if applicable_candidates: best_candidate = max(applicable_candidates, key=self._candidate_sort_key) else: best_candidate = None if (req.satisfied_by is not None): installed_version = parse_version(req.satisfied_by.version) else: installed_version = None if ((installed_version is None) and (best_candidate is None)): logger.critical('Could not find a version that satisfies the requirement %s (from versions: %s)', req, ', '.join(sorted(set((str(c.version) for c in all_candidates)), key=parse_version))) raise DistributionNotFound(('No matching distribution found for %s' % req)) best_installed = False if (installed_version and ((best_candidate is None) or (best_candidate.version <= installed_version))): best_installed = True if ((not upgrade) and (installed_version is not None)): if best_installed: logger.debug('Existing installed version (%s) is most up-to-date and satisfies requirement', installed_version) else: logger.debug('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)', installed_version, best_candidate.version) return None if best_installed: logger.debug('Installed version (%s) is most up-to-date (past versions: %s)', installed_version, (', '.join(sorted(compatible_versions, key=parse_version)) or 'none')) raise BestVersionAlreadyInstalled logger.debug('Using version %s (newest of versions: %s)', best_candidate.version, ', '.join(sorted(compatible_versions, key=parse_version))) return best_candidate.location
'Yields (page, page_url) from the given locations, skipping locations that have errors.'
def _get_pages(self, locations, project_name):
seen = set() for location in locations: if (location in seen): continue seen.add(location) page = self._get_page(location) if (page is None): continue (yield page)
'Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates'
def _sort_links(self, links):
(eggs, no_eggs) = ([], []) seen = set() for link in links: if (link not in seen): seen.add(link) if link.egg_fragment: eggs.append(link) else: no_eggs.append(link) return (no_eggs + eggs)
'Return an InstallationCandidate or None'
def _link_package_versions(self, link, search):
version = None if link.egg_fragment: egg_info = link.egg_fragment ext = link.ext else: (egg_info, ext) = link.splitext() if (not ext): self._log_skipped_link(link, 'not a file') return if (ext not in SUPPORTED_EXTENSIONS): self._log_skipped_link(link, ('unsupported archive format: %s' % ext)) return if (('binary' not in search.formats) and (ext == wheel_ext)): self._log_skipped_link(link, ('No binaries permitted for %s' % search.supplied)) return if (('macosx10' in link.path) and (ext == '.zip')): self._log_skipped_link(link, 'macosx10 one') return if (ext == wheel_ext): try: wheel = Wheel(link.filename) except InvalidWheelFilename: self._log_skipped_link(link, 'invalid wheel filename') return if (canonicalize_name(wheel.name) != search.canonical): self._log_skipped_link(link, ('wrong project name (not %s)' % search.supplied)) return if (not wheel.supported()): self._log_skipped_link(link, 'it is not compatible with this Python') return version = wheel.version if (('source' not in search.formats) and (ext != wheel_ext)): self._log_skipped_link(link, ('No sources permitted for %s' % search.supplied)) return if (not version): version = egg_info_matches(egg_info, search.supplied, link) if (version is None): self._log_skipped_link(link, ('wrong project name (not %s)' % search.supplied)) return match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if (py_version != sys.version[:3]): self._log_skipped_link(link, 'Python version is incorrect') return logger.debug('Found link %s, version: %s', link, version) return InstallationCandidate(search.supplied, version, link)
'Get the Content-Type of the given url, using a HEAD request'
@staticmethod def _get_content_type(url, session):
(scheme, netloc, path, query, fragment) = urllib_parse.urlsplit(url) if (scheme not in ('http', 'https')): return '' resp = session.head(url, allow_redirects=True) resp.raise_for_status() return resp.headers.get('Content-Type', '')
'Yields all links in the page'
@property def links(self):
for anchor in self.parsed.findall('.//a'): if anchor.get('href'): href = anchor.get('href') url = self.clean_link(urllib_parse.urljoin(self.base_url, href)) (yield Link(url, self))
'Makes sure a link is fully encoded. That is, if a \' \' shows up in the link, it will be rewritten to %20 (while not over-quoting % or other characters).'
def clean_link(self, url):
return self._clean_re.sub((lambda match: ('%%%2x' % ord(match.group(0)))), url)
'Determines if this points to an actual artifact (e.g. a tarball) or if it points to an "abstract" thing like a path or a VCS location.'
@property def is_artifact(self):
from pip.vcs import vcs if (self.scheme in vcs.all_schemes): return False return True
'Prints the completion code of the given shell'
def run(self, options, args):
shells = COMPLETION_SCRIPTS.keys() shell_options = [('--' + shell) for shell in sorted(shells)] if (options.shell in shells): script = COMPLETION_SCRIPTS.get(options.shell, '') print (BASE_COMPLETION % {'script': script, 'shell': options.shell}) else: sys.stderr.write(('ERROR: You must pass %s\n' % ' or '.join(shell_options)))
'Create a package finder appropriate to this list command.'
def _build_package_finder(self, options, index_urls, session):
return PackageFinder(find_links=options.find_links, index_urls=index_urls, allow_all_prereleases=options.pre, trusted_hosts=options.trusted_hosts, process_dependency_links=options.process_dependency_links, session=session)
'Return a summary of me for display under the heading. This default implementation simply prints a description of the triggering requirement. :param req: The InstallRequirement that provoked this error, with populate_link() having already been called'
def body(self):
return (' %s' % self._requirement_name())
'Return a description of the requirement that triggered me. This default implementation returns long description of the req, with line numbers'
def _requirement_name(self):
return (str(self.req) if self.req else 'unknown package')
':param gotten_hash: The hash of the (possibly malicious) archive we just downloaded'
def __init__(self, gotten_hash):
self.gotten_hash = gotten_hash
':param allowed: A dict of algorithm names pointing to lists of allowed hex digests :param gots: A dict of algorithm names pointing to hashes we actually got from the files under suspicion'
def __init__(self, allowed, gots):
self.allowed = allowed self.gots = gots
'Return a comparison of actual and expected hash values. Example:: Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde or 123451234512345123451234512345123451234512345 Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef'
def _hash_comparison(self):
def hash_then_or(hash_name): return chain([hash_name], repeat(' or')) lines = [] for (hash_name, expecteds) in iteritems(self.allowed): prefix = hash_then_or(hash_name) lines.extend(((' Expected %s %s' % (next(prefix), e)) for e in expecteds)) lines.append((' Got %s\n' % self.gots[hash_name].hexdigest())) prefix = ' or' return '\n'.join(lines)
'Return the name of the version control backend if found at given location, e.g. vcs.get_backend_name(\'/path/to/vcs/checkout\')'
def get_backend_name(self, location):
for vc_type in self._registry.values(): if vc_type.controls_location(location): logger.debug('Determine that %s uses VCS: %s', location, vc_type.name) return vc_type.name return None
'posix absolute paths start with os.path.sep, win32 ones ones start with drive (like c:\folder)'
def _is_local_repository(self, repo):
(drive, tail) = os.path.splitdrive(repo) return (repo.startswith(os.path.sep) or drive)
'Export the repository at the url to the destination location i.e. only download the files, without vcs informations'
def export(self, location):
raise NotImplementedError
'Returns the correct repository URL and revision by parsing the given repository URL'
def get_url_rev(self):
error_message = "Sorry, '%s' is a malformed VCS url. The format is <vcs>+<protocol>://<url>, e.g. svn+http://myrepo/svn/MyApp#egg=MyApp" assert ('+' in self.url), (error_message % self.url) url = self.url.split('+', 1)[1] (scheme, netloc, path, query, frag) = urllib_parse.urlsplit(url) rev = None if ('@' in path): (path, rev) = path.rsplit('@', 1) url = urllib_parse.urlunsplit((scheme, netloc, path, query, '')) return (url, rev)
'Returns (url, revision), where both are strings'
def get_info(self, location):
assert (not location.rstrip('/').endswith(self.dirname)), ('Bad directory: %s' % location) return (self.get_url(location), self.get_revision(location))
'Normalize a URL for comparison by unquoting it and removing any trailing slash.'
def normalize_url(self, url):
return urllib_parse.unquote(url).rstrip('/')
'Compare two repo URLs for identity, ignoring incidental differences.'
def compare_urls(self, url1, url2):
return (self.normalize_url(url1) == self.normalize_url(url2))
'Called when installing or updating an editable package, takes the source path of the checkout.'
def obtain(self, dest):
raise NotImplementedError
'Switch the repo at ``dest`` to point to ``URL``.'
def switch(self, dest, url, rev_options):
raise NotImplementedError
'Update an already-existing repo to the given ``rev_options``.'
def update(self, dest, rev_options):
raise NotImplementedError
'Return True if the version is identical to what exists and doesn\'t need to be updated.'
def check_version(self, dest, rev_options):
raise NotImplementedError
'Prepare a location to receive a checkout/clone. Return True if the location is ready for (and requires) a checkout/clone, False otherwise.'
def check_destination(self, dest, url, rev_options, rev_display):
checkout = True prompt = False if os.path.exists(dest): checkout = False if os.path.exists(os.path.join(dest, self.dirname)): existing_url = self.get_url(dest) if self.compare_urls(existing_url, url): logger.debug('%s in %s exists, and has correct URL (%s)', self.repo_name.title(), display_path(dest), url) if (not self.check_version(dest, rev_options)): logger.info('Updating %s %s%s', display_path(dest), self.repo_name, rev_display) self.update(dest, rev_options) else: logger.info('Skipping because already up-to-date.') else: logger.warning('%s %s in %s exists with URL %s', self.name, self.repo_name, display_path(dest), existing_url) prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', ('s', 'i', 'w', 'b')) else: logger.warning('Directory %s already exists, and is not a %s %s.', dest, self.name, self.repo_name) prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b')) if prompt: logger.warning('The plan is to install the %s repository %s', self.name, url) response = ask_path_exists(('What to do? %s' % prompt[0]), prompt[1]) if (response == 's'): logger.info('Switching %s %s to %s%s', self.repo_name, display_path(dest), url, rev_display) self.switch(dest, url, rev_options) elif (response == 'i'): pass elif (response == 'w'): logger.warning('Deleting %s', display_path(dest)) rmtree(dest) checkout = True elif (response == 'b'): dest_dir = backup_dir(dest) logger.warning('Backing up %s to %s', display_path(dest), dest_dir) shutil.move(dest, dest_dir) checkout = True return checkout
'Clean up current location and download the url repository (and vcs infos) into location'
def unpack(self, location):
if os.path.exists(location): rmtree(location) self.obtain(location)
'Return a string representing the requirement needed to redownload the files currently present in location, something like: {repository_url}@{revision}#egg={project_name}-{version_identifier}'
def get_src_requirement(self, dist, location):
raise NotImplementedError
'Return the url used at location Used in get_info or check_destination'
def get_url(self, location):
raise NotImplementedError
'Return the current revision of the files at location Used in get_info'
def get_revision(self, location):
raise NotImplementedError
'Run a VCS subcommand This is simply a wrapper around call_subprocess that adds the VCS command name, and checks that the VCS is available'
def run_command(self, cmd, show_stdout=True, cwd=None, on_returncode='raise', command_level=logging.DEBUG, command_desc=None, extra_environ=None, spinner=None):
cmd = ([self.name] + cmd) try: return call_subprocess(cmd, show_stdout, cwd, on_returncode, command_level, command_desc, extra_environ, spinner) except OSError as e: if (e.errno == errno.ENOENT): raise BadCommand(('Cannot find command %r' % self.name)) else: raise
'Check if a location is controlled by the vcs. It is meant to be overridden to implement smarter detection mechanisms for specific vcs.'
@classmethod def controls_location(cls, location):
logger.debug('Checking in %s for %s (%s)...', location, cls.dirname, cls.name) path = os.path.join(location, cls.dirname) return os.path.exists(path)
'Export the Hg repository at the url to the destination location'
def export(self, location):
temp_dir = tempfile.mkdtemp('-export', 'pip-') self.unpack(temp_dir) try: self.run_command(['archive', location], show_stdout=False, cwd=temp_dir) finally: rmtree(temp_dir)
'Always assume the versions don\'t match'
def check_version(self, dest, rev_options):
return False
'Returns (url, revision), where both are strings'
def get_info(self, location):
assert (not location.rstrip('/').endswith(self.dirname)), ('Bad directory: %s' % location) output = self.run_command(['info', location], show_stdout=False, extra_environ={'LANG': 'C'}) match = _svn_url_re.search(output) if (not match): logger.warning('Cannot determine URL of svn checkout %s', display_path(location)) logger.debug('Output that cannot be parsed: \n%s', output) return (None, None) url = match.group(1).strip() match = _svn_revision_re.search(output) if (not match): logger.warning('Cannot determine revision of svn checkout %s', display_path(location)) logger.debug('Output that cannot be parsed: \n%s', output) return (url, None) return (url, match.group(1))
'Export the svn repository at the url to the destination location'
def export(self, location):
(url, rev) = self.get_url_rev() rev_options = get_rev_options(url, rev) logger.info('Exporting svn repository %s to %s', url, location) with indent_log(): if os.path.exists(location): rmtree(location) self.run_command(((['export'] + rev_options) + [url, location]), show_stdout=False)
'Return the maximum revision for all files under a given location'
def get_revision(self, location):
revision = 0 for (base, dirs, files) in os.walk(location): if (self.dirname not in dirs): dirs[:] = [] continue dirs.remove(self.dirname) entries_fn = os.path.join(base, self.dirname, 'entries') if (not os.path.exists(entries_fn)): continue (dirurl, localrev) = self._get_svn_url_rev(base) if (base == location): base_url = (dirurl + '/') elif ((not dirurl) or (not dirurl.startswith(base_url))): dirs[:] = [] continue revision = max(revision, localrev) return revision
'Always assume the versions don\'t match'
def check_version(self, dest, rev_options):
return False
'Export the Bazaar repository at the url to the destination location'
def export(self, location):
temp_dir = tempfile.mkdtemp('-export', 'pip-') self.unpack(temp_dir) if os.path.exists(location): rmtree(location) try: self.run_command(['export', location], cwd=temp_dir, show_stdout=False) finally: rmtree(temp_dir)
'Always assume the versions don\'t match'
def check_version(self, dest, rev_options):
return False
'Export the Git repository at the url to the destination location'
def export(self, location):
temp_dir = tempfile.mkdtemp('-export', 'pip-') self.unpack(temp_dir) try: if (not location.endswith('/')): location = (location + '/') self.run_command(['checkout-index', '-a', '-f', '--prefix', location], show_stdout=False, cwd=temp_dir) finally: rmtree(temp_dir)
'Check the revision options before checkout to compensate that tags and branches may need origin/ as a prefix. Returns the SHA1 of the branch or tag if found.'
def check_rev_options(self, rev, dest, rev_options):
revisions = self.get_short_refs(dest) origin_rev = ('origin/%s' % rev) if (origin_rev in revisions): return [revisions[origin_rev]] elif (rev in revisions): return [revisions[rev]] else: logger.warning("Could not find a tag or branch '%s', assuming commit.", rev) return rev_options
'Compare the current sha to the ref. ref may be a branch or tag name, but current rev will always point to a sha. This means that a branch or tag will never compare as True. So this ultimately only matches against exact shas.'
def check_version(self, dest, rev_options):
return self.get_revision(dest).startswith(rev_options[0])
'Return URL of the first remote encountered.'
def get_url(self, location):
remotes = self.run_command(['config', '--get-regexp', 'remote\\..*\\.url'], show_stdout=False, cwd=location) first_remote = remotes.splitlines()[0] url = first_remote.split(' ')[1] return url.strip()
'Yields tuples of (commit, ref) for branches and tags'
def get_full_refs(self, location):
output = self.run_command(['show-ref'], show_stdout=False, cwd=location) for line in output.strip().splitlines(): (commit, ref) = line.split(' ', 1) (yield (commit.strip(), ref.strip()))
'A ref is a commit sha if it is not anything else'
def is_ref_commit(self, ref):
return (not any((self.is_ref_remote(ref), self.is_ref_branch(ref), self.is_ref_tag(ref))))
'Return map of named refs (branches or tags) to commit hashes.'
def get_short_refs(self, location):
rv = {} for (commit, ref) in self.get_full_refs(location): ref_name = None if self.is_ref_remote(ref): ref_name = ref[len('refs/remotes/'):] elif self.is_ref_branch(ref): ref_name = ref[len('refs/heads/'):] elif self.is_ref_tag(ref): ref_name = ref[len('refs/tags/'):] if (ref_name is not None): rv[ref_name] = commit return rv
'Return the relative path of setup.py to the git repo root.'
def _get_subdirectory(self, location):
git_dir = self.run_command(['rev-parse', '--git-dir'], show_stdout=False, cwd=location).strip() if (not os.path.isabs(git_dir)): git_dir = os.path.join(location, git_dir) root_dir = os.path.join(git_dir, '..') orig_location = location while (not os.path.exists(os.path.join(location, 'setup.py'))): last_location = location location = os.path.dirname(location) if (location == last_location): logger.warning('Could not find setup.py for directory %s (tried all parent directories)', orig_location) return None if samefile(root_dir, location): return None return os.path.relpath(location, root_dir)
'Prefixes stub URLs like \'user@hostname:user/repo.git\' with \'ssh://\'. That\'s required because although they use SSH they sometimes doesn\'t work with a ssh:// scheme (e.g. Github). But we need a scheme for parsing. Hence we remove it again afterwards and return it as a stub.'
def get_url_rev(self):
if ('://' not in self.url): assert ('file:' not in self.url) self.url = self.url.replace('git+', 'git+ssh://') (url, rev) = super(Git, self).get_url_rev() url = url.replace('ssh://', '') else: (url, rev) = super(Git, self).get_url_rev() return (url, rev)
'cookielib has no legitimate use for this method; add it back if you find one.'
def add_header(self, key, val):
raise NotImplementedError('Cookie headers should be added with add_unredirected_header()')
'Make a MockResponse for `cookielib` to read. :param headers: a httplib.HTTPMessage or analogous carrying the headers'
def __init__(self, headers):
self._headers = headers
'Dict-like get() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. .. warning:: operation is O(n), not O(1).'
def get(self, name, default=None, domain=None, path=None):
try: return self._find_no_duplicates(name, domain, path) except KeyError: return default
'Dict-like set() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains.'
def set(self, name, value, **kwargs):
if (value is None): remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) return if isinstance(value, Morsel): c = morsel_to_cookie(value) else: c = create_cookie(name, value, **kwargs) self.set_cookie(c) return c
'Dict-like iterkeys() that returns an iterator of names of cookies from the jar. See itervalues() and iteritems().'
def iterkeys(self):
for cookie in iter(self): (yield cookie.name)
'Dict-like keys() that returns a list of names of cookies from the jar. See values() and items().'
def keys(self):
return list(self.iterkeys())
'Dict-like itervalues() that returns an iterator of values of cookies from the jar. See iterkeys() and iteritems().'
def itervalues(self):
for cookie in iter(self): (yield cookie.value)
'Dict-like values() that returns a list of values of cookies from the jar. See keys() and items().'
def values(self):
return list(self.itervalues())
'Dict-like iteritems() that returns an iterator of name-value tuples from the jar. See iterkeys() and itervalues().'
def iteritems(self):
for cookie in iter(self): (yield (cookie.name, cookie.value))
'Dict-like items() that returns a list of name-value tuples from the jar. See keys() and values(). Allows client-code to call ``dict(RequestsCookieJar)`` and get a vanilla python dict of key value pairs.'
def items(self):
return list(self.iteritems())
'Utility method to list all the domains in the jar.'
def list_domains(self):
domains = [] for cookie in iter(self): if (cookie.domain not in domains): domains.append(cookie.domain) return domains
'Utility method to list all the paths in the jar.'
def list_paths(self):
paths = [] for cookie in iter(self): if (cookie.path not in paths): paths.append(cookie.path) return paths
'Returns True if there are multiple domains in the jar. Returns False otherwise.'
def multiple_domains(self):
domains = [] for cookie in iter(self): if ((cookie.domain is not None) and (cookie.domain in domains)): return True domains.append(cookie.domain) return False
'Takes as an argument an optional domain and path and returns a plain old Python dict of name-value pairs of cookies that meet the requirements.'
def get_dict(self, domain=None, path=None):
dictionary = {} for cookie in iter(self): if (((domain is None) or (cookie.domain == domain)) and ((path is None) or (cookie.path == path))): dictionary[cookie.name] = cookie.value return dictionary
'Dict-like __getitem__() for compatibility with client code. Throws exception if there are more than one cookie with name. In that case, use the more explicit get() method instead. .. warning:: operation is O(n), not O(1).'
def __getitem__(self, name):
return self._find_no_duplicates(name)
'Dict-like __setitem__ for compatibility with client code. Throws exception if there is already a cookie of that name in the jar. In that case, use the more explicit set() method instead.'
def __setitem__(self, name, value):
self.set(name, value)
'Deletes a cookie given a name. Wraps ``cookielib.CookieJar``\'s ``remove_cookie_by_name()``.'
def __delitem__(self, name):
remove_cookie_by_name(self, name)
'Updates this jar with cookies from another CookieJar or dict-like'
def update(self, other):
if isinstance(other, cookielib.CookieJar): for cookie in other: self.set_cookie(copy.copy(cookie)) else: super(RequestsCookieJar, self).update(other)
'Requests uses this method internally to get cookie values. Takes as args name and optional domain and path. Returns a cookie.value. If there are conflicting cookies, _find arbitrarily chooses one. See _find_no_duplicates if you want an exception thrown if there are conflicting cookies.'
def _find(self, name, domain=None, path=None):
for cookie in iter(self): if (cookie.name == name): if ((domain is None) or (cookie.domain == domain)): if ((path is None) or (cookie.path == path)): return cookie.value raise KeyError(('name=%r, domain=%r, path=%r' % (name, domain, path)))
'Both ``__get_item__`` and ``get`` call this function: it\'s never used elsewhere in Requests. Takes as args name and optional domain and path. Returns a cookie.value. Throws KeyError if cookie is not found and CookieConflictError if there are multiple cookies that match name and optionally domain and path.'
def _find_no_duplicates(self, name, domain=None, path=None):
toReturn = None for cookie in iter(self): if (cookie.name == name): if ((domain is None) or (cookie.domain == domain)): if ((path is None) or (cookie.path == path)): if (toReturn is not None): raise CookieConflictError(('There are multiple cookies with name, %r' % name)) toReturn = cookie.value if toReturn: return toReturn raise KeyError(('name=%r, domain=%r, path=%r' % (name, domain, path)))
'Unlike a normal CookieJar, this class is pickleable.'
def __getstate__(self):
state = self.__dict__.copy() state.pop('_cookies_lock') return state
'Unlike a normal CookieJar, this class is pickleable.'
def __setstate__(self, state):
self.__dict__.update(state) if ('_cookies_lock' not in self.__dict__): self._cookies_lock = threading.RLock()
'Return a copy of this RequestsCookieJar.'
def copy(self):
new_cj = RequestsCookieJar() new_cj.update(self) return new_cj
'reset analyser, clear any state'
def reset(self):
self._mDone = False self._mTotalChars = 0 self._mFreqChars = 0
'feed a character with known length'
def feed(self, aBuf, aCharLen):
if (aCharLen == 2): order = self.get_order(aBuf) else: order = (-1) if (order >= 0): self._mTotalChars += 1 if (order < self._mTableSize): if (512 > self._mCharToFreqOrder[order]): self._mFreqChars += 1
'return confidence based on existing data'
def get_confidence(self):
if ((self._mTotalChars <= 0) or (self._mFreqChars <= MINIMUM_DATA_THRESHOLD)): return SURE_NO if (self._mTotalChars != self._mFreqChars): r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars) * self._mTypicalDistributionRatio)) if (r < SURE_YES): return r return SURE_YES
'Should we redirect and where to? :returns: Truthy redirect location string if we got a redirect status code and valid location. ``None`` if redirect status and no location. ``False`` if not a redirect status code.'
def get_redirect_location(self):
if (self.status in self.REDIRECT_STATUSES): return self.headers.get('location') return False
'Obtain the number of bytes pulled over the wire so far. May differ from the amount of content returned by :meth:``HTTPResponse.read`` if bytes are encoded on the wire (e.g, compressed).'
def tell(self):
return self._fp_bytes_read
'Set-up the _decoder attribute if necessar.'
def _init_decoder(self):
content_encoding = self.headers.get('content-encoding', '').lower() if ((self._decoder is None) and (content_encoding in self.CONTENT_DECODERS)): self._decoder = _get_decoder(content_encoding)
'Decode the data passed in and potentially flush the decoder.'
def _decode(self, data, decode_content, flush_decoder):
try: if (decode_content and self._decoder): data = self._decoder.decompress(data) except (IOError, zlib.error) as e: content_encoding = self.headers.get('content-encoding', '').lower() raise DecodeError(('Received response with content-encoding: %s, but failed to decode it.' % content_encoding), e) if (flush_decoder and decode_content): data += self._flush_decoder() return data
'Flushes the decoder. Should only be called if the decoder is actually being used.'
def _flush_decoder(self):
if self._decoder: buf = self._decoder.decompress('') return (buf + self._decoder.flush()) return ''
'Catch low-level python exceptions, instead re-raising urllib3 variants, so that low-level exceptions are not leaked in the high-level api. On exit, release the connection back to the pool.'
@contextmanager def _error_catcher(self):
try: try: (yield) except SocketTimeout: raise ReadTimeoutError(self._pool, None, 'Read timed out.') except BaseSSLError as e: if ('read operation timed out' not in str(e)): raise raise ReadTimeoutError(self._pool, None, 'Read timed out.') except (HTTPException, SocketError) as e: raise ProtocolError(('Connection broken: %r' % e), e) except Exception: if (self._original_response and (not self._original_response.isclosed())): self._original_response.close() if (self._connection is not None): self._connection.close() raise finally: if (self._original_response and self._original_response.isclosed()): self.release_conn()
'Similar to :meth:`httplib.HTTPResponse.read`, but with two additional parameters: ``decode_content`` and ``cache_content``. :param amt: How much of the content to read. If specified, caching is skipped because it doesn\'t make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the \'content-encoding\' header. :param cache_content: If True, will save the returned data such that the same result is returned despite of the state of the underlying file object. This is useful if you want the ``.data`` property to continue working after having ``.read()`` the file object. (Overridden if ``amt`` is set.)'
def read(self, amt=None, decode_content=None, cache_content=False):
self._init_decoder() if (decode_content is None): decode_content = self.decode_content if (self._fp is None): return flush_decoder = False data = None with self._error_catcher(): if (amt is None): data = self._fp.read() flush_decoder = True else: cache_content = False data = self._fp.read(amt) if ((amt != 0) and (not data)): self._fp.close() flush_decoder = True if data: self._fp_bytes_read += len(data) data = self._decode(data, decode_content, flush_decoder) if cache_content: self._body = data return data
'A generator wrapper for the read() method. A call will block until ``amt`` bytes have been read from the connection or until the connection is closed. :param amt: How much of the content to read. The generator will return up to much data per iteration, but may return less. This is particularly likely when using compressed data. However, the empty string will never be returned. :param decode_content: If True, will attempt to decode the body based on the \'content-encoding\' header.'
def stream(self, amt=(2 ** 16), decode_content=None):
if self.chunked: for line in self.read_chunked(amt, decode_content=decode_content): (yield line) else: while (not is_fp_closed(self._fp)): data = self.read(amt=amt, decode_content=decode_content) if data: (yield data)
'Given an :class:`httplib.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object. Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``.'
@classmethod def from_httplib(ResponseCls, r, **response_kw):
headers = r.msg if (not isinstance(headers, HTTPHeaderDict)): if PY3: headers = HTTPHeaderDict(headers.items()) else: headers = HTTPHeaderDict.from_httplib(headers) strict = getattr(r, 'strict', 0) resp = ResponseCls(body=r, headers=headers, status=r.status, version=r.version, reason=r.reason, strict=strict, original_response=r, **response_kw) return resp
'Similar to :meth:`HTTPResponse.read`, but with an additional parameter: ``decode_content``. :param decode_content: If True, will attempt to decode the body based on the \'content-encoding\' header.'
def read_chunked(self, amt=None, decode_content=None):
self._init_decoder() if (not self.chunked): raise ResponseNotChunked("Response is not chunked. Header 'transfer-encoding: chunked' is missing.") if (self._original_response and is_response_to_head(self._original_response)): self._original_response.close() return with self._error_catcher(): while True: self._update_chunk_length() if (self.chunk_left == 0): break chunk = self._handle_chunk(amt) decoded = self._decode(chunk, decode_content=decode_content, flush_decoder=False) if decoded: (yield decoded) if decode_content: decoded = self._flush_decoder() if decoded: (yield decoded) while True: line = self._fp.fp.readline() if (not line): break if (line == '\r\n'): break if self._original_response: self._original_response.close()
'Create a new :class:`ConnectionPool` based on host, port and scheme. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization.'
def _new_pool(self, scheme, host, port):
pool_cls = pool_classes_by_scheme[scheme] kwargs = self.connection_pool_kw if (scheme == 'http'): kwargs = self.connection_pool_kw.copy() for kw in SSL_KEYWORDS: kwargs.pop(kw, None) return pool_cls(host, port, **kwargs)
'Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion.'
def clear(self):
self.pools.clear()
'Get a :class:`ConnectionPool` based on the host, port, and scheme. If ``port`` isn\'t given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``.'
def connection_from_host(self, host, port=None, scheme='http'):
if (not host): raise LocationValueError('No host specified.') scheme = (scheme or 'http') port = (port or port_by_scheme.get(scheme, 80)) pool_key = (scheme, host, port) with self.pools.lock: pool = self.pools.get(pool_key) if pool: return pool pool = self._new_pool(scheme, host, port) self.pools[pool_key] = pool return pool
'Similar to :func:`urllib3.connectionpool.connection_from_url` but doesn\'t pass any additional parameters to the :class:`urllib3.connectionpool.ConnectionPool` constructor. Additional parameters are taken from the :class:`.PoolManager` constructor.'
def connection_from_url(self, url):
u = parse_url(url) return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
'Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.'
def urlopen(self, method, url, redirect=True, **kw):
u = parse_url(url) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw['assert_same_host'] = False kw['redirect'] = False if ('headers' not in kw): kw['headers'] = self.headers if ((self.proxy is not None) and (u.scheme == 'http')): response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = (redirect and response.get_redirect_location()) if (not redirect_location): return response redirect_location = urljoin(url, redirect_location) if (response.status == 303): method = 'GET' retries = kw.get('retries') if (not isinstance(retries, Retry)): retries = Retry.from_int(retries, redirect=redirect) try: retries = retries.increment(method, url, response=response, _pool=conn) except MaxRetryError: if retries.raise_on_redirect: raise return response kw['retries'] = retries kw['redirect'] = redirect log.info(('Redirecting %s -> %s' % (url, redirect_location))) return self.urlopen(method, redirect_location, **kw)
'Sets headers needed by proxies: specifically, the Accept and Host headers. Only sets headers not provided by the user.'
def _set_proxy_headers(self, url, headers=None):
headers_ = {'Accept': '*/*'} netloc = parse_url(url).netloc if netloc: headers_['Host'] = netloc if headers: headers_.update(headers) return headers_
'Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute.'
def urlopen(self, method, url, redirect=True, **kw):
u = parse_url(url) if (u.scheme == 'http'): headers = kw.get('headers', self.headers) kw['headers'] = self._set_proxy_headers(url, headers) return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
'Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary.'
def __init__(self, *args, **kwds):
if (len(args) > 1): raise TypeError(('expected at most 1 arguments, got %d' % len(args))) try: self.__root except AttributeError: self.__root = root = [] root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds)
'od.__setitem__(i, y) <==> od[i]=y'
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
if (key not in self): root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value)
'od.__delitem__(y) <==> del od[y]'
def __delitem__(self, key, dict_delitem=dict.__delitem__):
dict_delitem(self, key) (link_prev, link_next, key) = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev
'od.__iter__() <==> iter(od)'
def __iter__(self):
root = self.__root curr = root[1] while (curr is not root): (yield curr[2]) curr = curr[1]
'od.__reversed__() <==> reversed(od)'
def __reversed__(self):
root = self.__root curr = root[0] while (curr is not root): (yield curr[2]) curr = curr[0]
'od.clear() -> None. Remove all items from od.'
def clear(self):
try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self)
'od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false.'
def popitem(self, last=True):
if (not self): raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return (key, value)