desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Clears the internal cache.'
def clear_cache(self):
self._cache.clear() self._cache_egg.clear()
'Yield .dist-info and/or .egg(-info) distributions.'
def _yield_distributions(self):
seen = set() for path in self.path: finder = resources.finder_for_path(path) if (finder is None): continue r = finder.find(u'') if ((not r) or (not r.is_container)): continue rset = sorted(r.resources) for entry in rset: r = finder.find(entry) if ((not r) or (r.path in seen)): continue if (self._include_dist and entry.endswith(DISTINFO_EXT)): possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME] for metadata_filename in possible_filenames: metadata_path = posixpath.join(entry, metadata_filename) pydist = finder.find(metadata_path) if pydist: break else: continue with contextlib.closing(pydist.as_stream()) as stream: metadata = Metadata(fileobj=stream, scheme=u'legacy') logger.debug(u'Found %s', r.path) seen.add(r.path) (yield new_dist_class(r.path, metadata=metadata, env=self)) elif (self._include_egg and entry.endswith((u'.egg-info', u'.egg'))): logger.debug(u'Found %s', r.path) seen.add(r.path) (yield old_dist_class(r.path, self))
'Scan the path for distributions and populate the cache with those that are found.'
def _generate_cache(self):
gen_dist = (not self._cache.generated) gen_egg = (self._include_egg and (not self._cache_egg.generated)) if (gen_dist or gen_egg): for dist in self._yield_distributions(): if isinstance(dist, InstalledDistribution): self._cache.add(dist) else: self._cache_egg.add(dist) if gen_dist: self._cache.generated = True if gen_egg: self._cache_egg.generated = True
'The *name* and *version* parameters are converted into their filename-escaped form, i.e. any ``\'-\'`` characters are replaced with ``\'_\'`` other than the one in ``\'dist-info\'`` and the one separating the name from the version number. :parameter name: is converted to a standard distribution name by replacing any runs of non- alphanumeric characters with a single :type name: string :parameter version: is converted to a standard version string. Spaces become dots, and all other non-alphanumeric characters (except dots) become dashes, with runs of multiple dashes condensed to a single dash. :type version: string :returns: directory name :rtype: string'
@classmethod def distinfo_dirname(cls, name, version):
name = name.replace(u'-', u'_') return (u'-'.join([name, version]) + DISTINFO_EXT)
'Provides an iterator that looks for distributions and returns :class:`InstalledDistribution` or :class:`EggInfoDistribution` instances for each one of them. :rtype: iterator of :class:`InstalledDistribution` and :class:`EggInfoDistribution` instances'
def get_distributions(self):
if (not self._cache_enabled): for dist in self._yield_distributions(): (yield dist) else: self._generate_cache() for dist in self._cache.path.values(): (yield dist) if self._include_egg: for dist in self._cache_egg.path.values(): (yield dist)
'Looks for a named distribution on the path. This function only returns the first result found, as no more than one value is expected. If nothing is found, ``None`` is returned. :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` or ``None``'
def get_distribution(self, name):
result = None name = name.lower() if (not self._cache_enabled): for dist in self._yield_distributions(): if (dist.key == name): result = dist break else: self._generate_cache() if (name in self._cache.name): result = self._cache.name[name][0] elif (self._include_egg and (name in self._cache_egg.name)): result = self._cache_egg.name[name][0] return result
'Iterates over all distributions to find which distributions provide *name*. If a *version* is provided, it will be used to filter the results. This function only returns the first result found, since no more than one values are expected. If the directory is not found, returns ``None``. :parameter version: a version specifier that indicates the version required, conforming to the format in ``PEP-345`` :type name: string :type version: string'
def provides_distribution(self, name, version=None):
matcher = None if (not (version is None)): try: matcher = self._scheme.matcher((u'%s (%s)' % (name, version))) except ValueError: raise DistlibException((u'invalid name or version: %r, %r' % (name, version))) for dist in self.get_distributions(): provided = dist.provides for p in provided: (p_name, p_ver) = parse_name_and_version(p) if (matcher is None): if (p_name == name): (yield dist) break elif ((p_name == name) and matcher.match(p_ver)): (yield dist) break
'Return the path to a resource file.'
def get_file_path(self, name, relative_path):
dist = self.get_distribution(name) if (dist is None): raise LookupError((u'no distribution named %r found' % name)) return dist.get_resource_path(relative_path)
'Return all of the exported entries in a particular category. :param category: The category to search for entries. :param name: If specified, only entries with that name are returned.'
def get_exported_entries(self, category, name=None):
for dist in self.get_distributions(): r = dist.exports if (category in r): d = r[category] if (name is not None): if (name in d): (yield d[name]) else: for v in d.values(): (yield v)
'Initialise an instance. :param metadata: The instance of :class:`Metadata` describing this distribution.'
def __init__(self, metadata):
self.metadata = metadata self.name = metadata.name self.key = self.name.lower() self.version = metadata.version self.locator = None self.digest = None self.extras = None self.context = None self.download_urls = set() self.digests = {}
'The source archive download URL for this distribution.'
@property def source_url(self):
return self.metadata.source_url
'A utility property which displays the name and version in parentheses.'
@property def name_and_version(self):
return (u'%s (%s)' % (self.name, self.version))
'A set of distribution names and versions provided by this distribution. :return: A set of "name (version)" strings.'
@property def provides(self):
plist = self.metadata.provides s = (u'%s (%s)' % (self.name, self.version)) if (s not in plist): plist.append(s) return plist
'Say if this instance matches (fulfills) a requirement. :param req: The requirement to match. :rtype req: str :return: True if it matches, else False.'
def matches_requirement(self, req):
r = parse_requirement(req) scheme = get_scheme(self.metadata.scheme) try: matcher = scheme.matcher(r.requirement) except UnsupportedVersionError: logger.warning(u'could not read version %r - using name only', req) name = req.split()[0] matcher = scheme.matcher(name) name = matcher.key result = False for p in self.provides: (p_name, p_ver) = parse_name_and_version(p) if (p_name != name): continue try: result = matcher.match(p_ver) break except UnsupportedVersionError: pass return result
'Return a textual representation of this instance,'
def __repr__(self):
if self.source_url: suffix = (u' [%s]' % self.source_url) else: suffix = u'' return (u'<Distribution %s (%s)%s>' % (self.name, self.version, suffix))
'See if this distribution is the same as another. :param other: The distribution to compare with. To be equal to one another. distributions must have the same type, name, version and source_url. :return: True if it is the same, else False.'
def __eq__(self, other):
if (type(other) is not type(self)): result = False else: result = ((self.name == other.name) and (self.version == other.version) and (self.source_url == other.source_url)) return result
'Compute hash in a way which matches the equality test.'
def __hash__(self):
return ((hash(self.name) + hash(self.version)) + hash(self.source_url))
'Initialise an instance. :param metadata: An instance of :class:`Metadata` which describes the distribution. This will normally have been initialised from a metadata file in the ``path``. :param path: The path of the ``.dist-info`` or ``.egg-info`` directory for the distribution. :param env: This is normally the :class:`DistributionPath` instance where this distribution was found.'
def __init__(self, metadata, path, env=None):
super(BaseInstalledDistribution, self).__init__(metadata) self.path = path self.dist_path = env
'Get the hash of some data, using a particular hash algorithm, if specified. :param data: The data to be hashed. :type data: bytes :param hasher: The name of a hash implementation, supported by hashlib, or ``None``. Examples of valid values are ``\'sha1\'``, ``\'sha224\'``, ``\'sha384\'``, \'``sha256\'``, ``\'md5\'`` and ``\'sha512\'``. If no hasher is specified, the ``hasher`` attribute of the :class:`InstalledDistribution` instance is used. If the hasher is determined to be ``None``, MD5 is used as the hashing algorithm. :returns: The hash of the data. If a hasher was explicitly specified, the returned hash will be prefixed with the specified hasher followed by \'=\'. :rtype: str'
def get_hash(self, data, hasher=None):
if (hasher is None): hasher = self.hasher if (hasher is None): hasher = hashlib.md5 prefix = u'' else: hasher = getattr(hashlib, hasher) prefix = (u'%s=' % self.hasher) digest = hasher(data).digest() digest = base64.urlsafe_b64encode(digest).rstrip('=').decode(u'ascii') return (u'%s%s' % (prefix, digest))
'Get the list of installed files for the distribution :return: A list of tuples of path, hash and size. Note that hash and size might be ``None`` for some entries. The path is exactly as stored in the file (which is as in PEP 376).'
def _get_records(self):
results = [] r = self.get_distinfo_resource(u'RECORD') with contextlib.closing(r.as_stream()) as stream: with CSVReader(stream=stream) as record_reader: for row in record_reader: missing = [None for i in range(len(row), 3)] (path, checksum, size) = (row + missing) results.append((path, checksum, size)) return results
'Return the information exported by this distribution. :return: A dictionary of exports, mapping an export category to a dict of :class:`ExportEntry` instances describing the individual export entries, and keyed by name.'
@cached_property def exports(self):
result = {} r = self.get_distinfo_resource(EXPORTS_FILENAME) if r: result = self.read_exports() return result
'Read exports data from a file in .ini format. :return: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries.'
def read_exports(self):
result = {} r = self.get_distinfo_resource(EXPORTS_FILENAME) if r: with contextlib.closing(r.as_stream()) as stream: result = read_exports(stream) return result
'Write a dictionary of exports to a file in .ini format. :param exports: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries.'
def write_exports(self, exports):
rf = self.get_distinfo_file(EXPORTS_FILENAME) with open(rf, u'w') as f: write_exports(exports, f)
'NOTE: This API may change in the future. Return the absolute path to a resource file with the given relative path. :param relative_path: The path, relative to .dist-info, of the resource of interest. :return: The absolute path where the resource is to be found.'
def get_resource_path(self, relative_path):
r = self.get_distinfo_resource(u'RESOURCES') with contextlib.closing(r.as_stream()) as stream: with CSVReader(stream=stream) as resources_reader: for (relative, destination) in resources_reader: if (relative == relative_path): return destination raise KeyError((u'no resource file with relative path %r is installed' % relative_path))
'Iterates over the ``RECORD`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: iterator of (path, hash, size)'
def list_installed_files(self):
for result in self._get_records(): (yield result)
'Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any existing ``RECORD`` file is silently overwritten. prefix is used to determine when to write absolute paths.'
def write_installed_files(self, paths, prefix, dry_run=False):
prefix = os.path.join(prefix, u'') base = os.path.dirname(self.path) base_under_prefix = base.startswith(prefix) base = os.path.join(base, u'') record_path = self.get_distinfo_file(u'RECORD') logger.info(u'creating %s', record_path) if dry_run: return None with CSVWriter(record_path) as writer: for path in paths: if (os.path.isdir(path) or path.endswith((u'.pyc', u'.pyo'))): hash_value = size = u'' else: size = (u'%d' % os.path.getsize(path)) with open(path, u'rb') as fp: hash_value = self.get_hash(fp.read()) if (path.startswith(base) or (base_under_prefix and path.startswith(prefix))): path = os.path.relpath(path, base) writer.writerow((path, hash_value, size)) if record_path.startswith(base): record_path = os.path.relpath(record_path, base) writer.writerow((record_path, u'', u'')) return record_path
'Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, \'exists\', \'size\' or \'hash\' according to what didn\'t match (existence is checked first, then size, then hash), the expected value and the actual value.'
def check_installed_files(self):
mismatches = [] base = os.path.dirname(self.path) record_path = self.get_distinfo_file(u'RECORD') for (path, hash_value, size) in self.list_installed_files(): if (not os.path.isabs(path)): path = os.path.join(base, path) if (path == record_path): continue if (not os.path.exists(path)): mismatches.append((path, u'exists', True, False)) elif os.path.isfile(path): actual_size = str(os.path.getsize(path)) if (size and (actual_size != size)): mismatches.append((path, u'size', size, actual_size)) elif hash_value: if (u'=' in hash_value): hasher = hash_value.split(u'=', 1)[0] else: hasher = None with open(path, u'rb') as f: actual_hash = self.get_hash(f.read(), hasher) if (actual_hash != hash_value): mismatches.append((path, u'hash', hash_value, actual_hash)) return mismatches
'A dictionary of shared locations whose keys are in the set \'prefix\', \'purelib\', \'platlib\', \'scripts\', \'headers\', \'data\' and \'namespace\'. The corresponding value is the absolute path of that category for this distribution, and takes into account any paths selected by the user at installation time (e.g. via command-line arguments). In the case of the \'namespace\' key, this would be a list of absolute paths for the roots of namespace packages in this distribution. The first time this property is accessed, the relevant information is read from the SHARED file in the .dist-info directory.'
@cached_property def shared_locations(self):
result = {} shared_path = os.path.join(self.path, u'SHARED') if os.path.isfile(shared_path): with codecs.open(shared_path, u'r', encoding=u'utf-8') as f: lines = f.read().splitlines() for line in lines: (key, value) = line.split(u'=', 1) if (key == u'namespace'): result.setdefault(key, []).append(value) else: result[key] = value return result
'Write shared location information to the SHARED file in .dist-info. :param paths: A dictionary as described in the documentation for :meth:`shared_locations`. :param dry_run: If True, the action is logged but no file is actually written. :return: The path of the file written to.'
def write_shared_locations(self, paths, dry_run=False):
shared_path = os.path.join(self.path, u'SHARED') logger.info(u'creating %s', shared_path) if dry_run: return None lines = [] for key in (u'prefix', u'lib', u'headers', u'scripts', u'data'): path = paths[key] if os.path.isdir(paths[key]): lines.append((u'%s=%s' % (key, path))) for ns in paths.get(u'namespace', ()): lines.append((u'namespace=%s' % ns)) with codecs.open(shared_path, u'w', encoding=u'utf-8') as f: f.write(u'\n'.join(lines)) return shared_path
'Returns a path located under the ``.dist-info`` directory. Returns a string representing the path. :parameter path: a ``\'/\'``-separated path relative to the ``.dist-info`` directory or an absolute path; If *path* is an absolute path and doesn\'t start with the ``.dist-info`` directory path, a :class:`DistlibException` is raised :type path: str :rtype: str'
def get_distinfo_file(self, path):
if (path.find(os.sep) >= 0): (distinfo_dirname, path) = path.split(os.sep)[(-2):] if (distinfo_dirname != self.path.split(os.sep)[(-1)]): raise DistlibException((u'dist-info file %r does not belong to the %r %s distribution' % (path, self.name, self.version))) if (path not in DIST_FILES): raise DistlibException((u'invalid path for a dist-info file: %r at %r' % (path, self.path))) return os.path.join(self.path, path)
'Iterates over the ``RECORD`` entries and returns paths for each line if the path is pointing to a file located in the ``.dist-info`` directory or one of its subdirectories. :returns: iterator of paths'
def list_distinfo_files(self):
base = os.path.dirname(self.path) for (path, checksum, size) in self._get_records(): if (not os.path.isabs(path)): path = os.path.join(base, path) if path.startswith(self.path): (yield path)
'Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, \'exists\', \'size\' or \'hash\' according to what didn\'t match (existence is checked first, then size, then hash), the expected value and the actual value.'
def check_installed_files(self):
mismatches = [] record_path = os.path.join(self.path, u'installed-files.txt') if os.path.exists(record_path): for (path, _, _) in self.list_installed_files(): if (path == record_path): continue if (not os.path.exists(path)): mismatches.append((path, u'exists', True, False)) return mismatches
'Iterates over the ``installed-files.txt`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: a list of (path, hash, size)'
def list_installed_files(self):
def _md5(path): f = open(path, u'rb') try: content = f.read() finally: f.close() return hashlib.md5(content).hexdigest() def _size(path): return os.stat(path).st_size record_path = os.path.join(self.path, u'installed-files.txt') result = [] if os.path.exists(record_path): with codecs.open(record_path, u'r', encoding=u'utf-8') as f: for line in f: line = line.strip() p = os.path.normpath(os.path.join(self.path, line)) if (not os.path.exists(p)): logger.warning(u'Non-existent file: %s', p) if p.endswith((u'.pyc', u'.pyo')): continue if (not os.path.isdir(p)): result.append((p, _md5(p), _size(p))) result.append((record_path, None, None)) return result
'Iterates over the ``installed-files.txt`` entries and returns paths for each line if the path is pointing to a file located in the ``.egg-info`` directory or one of its subdirectories. :parameter absolute: If *absolute* is ``True``, each returned path is transformed into a local absolute path. Otherwise the raw value from ``installed-files.txt`` is returned. :type absolute: boolean :returns: iterator of paths'
def list_distinfo_files(self, absolute=False):
record_path = os.path.join(self.path, u'installed-files.txt') skip = True with codecs.open(record_path, u'r', encoding=u'utf-8') as f: for line in f: line = line.strip() if (line == u'./'): skip = False continue if (not skip): p = os.path.normpath(os.path.join(self.path, line)) if p.startswith(self.path): if absolute: (yield p) else: (yield line)
'Add the *distribution* to the graph. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution`'
def add_distribution(self, distribution):
self.adjacency_list[distribution] = [] self.reverse_list[distribution] = []
'Add an edge from distribution *x* to distribution *y* with the given *label*. :type x: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type y: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type label: ``str`` or ``None``'
def add_edge(self, x, y, label=None):
self.adjacency_list[x].append((y, label)) if (x not in self.reverse_list[y]): self.reverse_list[y].append(x)
'Add a missing *requirement* for the given *distribution*. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type requirement: ``str``'
def add_missing(self, distribution, requirement):
logger.debug(u'%s missing %r', distribution, requirement) self.missing.setdefault(distribution, []).append(requirement)
'Prints only a subgraph'
def repr_node(self, dist, level=1):
output = [self._repr_dist(dist)] for (other, label) in self.adjacency_list[dist]: dist = self._repr_dist(other) if (label is not None): dist = (u'%s [%s]' % (dist, label)) output.append(((u' ' * level) + str(dist))) suboutput = self.repr_node(other, (level + 1)) subs = suboutput.split(u'\n') output.extend(subs[1:]) return u'\n'.join(output)
'Writes a DOT output for the graph to the provided file *f*. If *skip_disconnected* is set to ``True``, then all distributions that are not dependent on any other distribution are skipped. :type f: has to support ``file``-like operations :type skip_disconnected: ``bool``'
def to_dot(self, f, skip_disconnected=True):
disconnected = [] f.write(u'digraph dependencies {\n') for (dist, adjs) in self.adjacency_list.items(): if ((len(adjs) == 0) and (not skip_disconnected)): disconnected.append(dist) for (other, label) in adjs: if (not (label is None)): f.write((u'"%s" -> "%s" [label="%s"]\n' % (dist.name, other.name, label))) else: f.write((u'"%s" -> "%s"\n' % (dist.name, other.name))) if ((not skip_disconnected) and (len(disconnected) > 0)): f.write(u'subgraph disconnected {\n') f.write(u'label = "Disconnected"\n') f.write(u'bgcolor = red\n') for dist in disconnected: f.write((u'"%s"' % dist.name)) f.write(u'\n') f.write(u'}\n') f.write(u'}\n')
'Perform a topological sort of the graph. :return: A tuple, the first element of which is a topologically sorted list of distributions, and the second element of which is a list of distributions that cannot be sorted because they have circular dependencies and so form a cycle.'
def topological_sort(self):
result = [] alist = {} for (k, v) in self.adjacency_list.items(): alist[k] = v[:] while True: to_remove = [] for (k, v) in list(alist.items())[:]: if (not v): to_remove.append(k) del alist[k] if (not to_remove): break for (k, v) in alist.items(): alist[k] = [(d, r) for (d, r) in v if (d not in to_remove)] logger.debug(u'Moving to result: %s', [(u'%s (%s)' % (d.name, d.version)) for d in to_remove]) result.extend(to_remove) return (result, list(alist.keys()))
'Representation of the graph'
def __repr__(self):
output = [] for (dist, adjs) in self.adjacency_list.items(): output.append(self.repr_node(dist)) return u'\n'.join(output)
'Initialise an instance. :param context: If specified, names are looked up in this mapping.'
def __init__(self, context=None):
self.context = (context or {}) self.source = None
'Get the part of the source which is causing a problem.'
def get_fragment(self, offset):
fragment_len = 10 s = ('%r' % self.source[offset:(offset + fragment_len)]) if ((offset + fragment_len) < len(self.source)): s += '...' return s
'Get a handler for the specified AST node type.'
def get_handler(self, node_type):
return getattr(self, ('do_%s' % node_type), None)
'Evaluate a source string or node, using ``filename`` when displaying errors.'
def evaluate(self, node, filename=None):
if isinstance(node, string_types): self.source = node kwargs = {'mode': 'eval'} if filename: kwargs['filename'] = filename try: node = ast.parse(node, **kwargs) except SyntaxError as e: s = self.get_fragment(e.offset) raise SyntaxError(('syntax error %s' % s)) node_type = node.__class__.__name__.lower() handler = self.get_handler(node_type) if (handler is None): if (self.source is None): s = '(source not available)' else: s = self.get_fragment(node.col_offset) raise SyntaxError(("don't know how to evaluate %r %s" % (node_type, s))) return handler(node)
'Check if the provided version matches the constraints. :param version: The version to match against this instance. :type version: Strring or :class:`Version` instance.'
def match(self, version):
if isinstance(version, string_types): version = self.version_class(version) for (operator, constraint, prefix) in self._parts: f = self._operators.get(operator) if isinstance(f, string_types): f = getattr(self, f) if (not f): msg = ('%r not implemented for %s' % (operator, self.__class__.__name__)) raise NotImplementedError(msg) if (not f(version, constraint, prefix)): return False return True
'Used for processing some metadata fields'
def is_valid_constraint_list(self, s):
return self.is_valid_matcher(('dummy_name (%s)' % s))
'Is the cache stale for the given resource? :param resource: The :class:`Resource` being cached. :param path: The path of the resource in the cache. :return: True if the cache is stale.'
def is_stale(self, resource, path):
return True
'Get a resource into the cache, :param resource: A :class:`Resource` instance. :return: The pathname of the resource in the cache.'
def get(self, resource):
(prefix, path) = resource.finder.get_cache_info(resource) if (prefix is None): result = path else: result = os.path.join(self.base, self.prefix_to_dir(prefix), path) dirname = os.path.dirname(result) if (not os.path.isdir(dirname)): os.makedirs(dirname) if (not os.path.exists(result)): stale = True else: stale = self.is_stale(resource, path) if stale: with open(result, u'wb') as f: f.write(resource.bytes) return result
'Get the resource as a stream. This is not a property to make it obvious that it returns a new stream each time.'
def as_stream(self):
return self.finder.get_stream(self)
'Initialise an instance using a (valid) filename.'
def __init__(self, filename=None, sign=False, verify=False):
self.sign = sign self.should_verify = verify self.buildver = u'' self.pyver = [PYVER] self.abi = [u'none'] self.arch = [u'any'] self.dirname = os.getcwd() if (filename is None): self.name = u'dummy' self.version = u'0.1' self._filename = self.filename else: m = NAME_VERSION_RE.match(filename) if m: info = m.groupdict(u'') self.name = info[u'nm'] self.version = info[u'vn'].replace(u'_', u'-') self.buildver = info[u'bn'] self._filename = self.filename else: (dirname, filename) = os.path.split(filename) m = FILENAME_RE.match(filename) if (not m): raise DistlibException((u'Invalid name or filename: %r' % filename)) if dirname: self.dirname = os.path.abspath(dirname) self._filename = filename info = m.groupdict(u'') self.name = info[u'nm'] self.version = info[u'vn'] self.buildver = info[u'bn'] self.pyver = info[u'py'].split(u'.') self.abi = info[u'bi'].split(u'.') self.arch = info[u'ar'].split(u'.')
'Build and return a filename from the various components.'
@property def filename(self):
if self.buildver: buildver = (u'-' + self.buildver) else: buildver = u'' pyver = u'.'.join(self.pyver) abi = u'.'.join(self.abi) arch = u'.'.join(self.arch) version = self.version.replace(u'-', u'_') return (u'%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, pyver, abi, arch))
'Build a wheel from files in specified paths, and use any specified tags when determining the name of the wheel.'
def build(self, paths, tags=None, wheel_version=None):
if (tags is None): tags = {} libkey = list(filter((lambda o: (o in paths)), (u'purelib', u'platlib')))[0] if (libkey == u'platlib'): is_pure = u'false' default_pyver = [IMPVER] default_abi = [ABI] default_arch = [ARCH] else: is_pure = u'true' default_pyver = [PYVER] default_abi = [u'none'] default_arch = [u'any'] self.pyver = tags.get(u'pyver', default_pyver) self.abi = tags.get(u'abi', default_abi) self.arch = tags.get(u'arch', default_arch) libdir = paths[libkey] name_ver = (u'%s-%s' % (self.name, self.version)) data_dir = (u'%s.data' % name_ver) info_dir = (u'%s.dist-info' % name_ver) archive_paths = [] for key in (u'data', u'headers', u'scripts'): if (key not in paths): continue path = paths[key] if os.path.isdir(path): for (root, dirs, files) in os.walk(path): for fn in files: p = fsdecode(os.path.join(root, fn)) rp = os.path.relpath(p, path) ap = to_posix(os.path.join(data_dir, key, rp)) archive_paths.append((ap, p)) if ((key == u'scripts') and (not p.endswith(u'.exe'))): with open(p, u'rb') as f: data = f.read() data = self.process_shebang(data) with open(p, u'wb') as f: f.write(data) path = libdir distinfo = None for (root, dirs, files) in os.walk(path): if (root == path): for (i, dn) in enumerate(dirs): dn = fsdecode(dn) if dn.endswith(u'.dist-info'): distinfo = os.path.join(root, dn) del dirs[i] break assert distinfo, u'.dist-info directory expected, not found' for fn in files: if fsdecode(fn).endswith((u'.pyc', u'.pyo')): continue p = os.path.join(root, fn) rp = to_posix(os.path.relpath(p, path)) archive_paths.append((rp, p)) files = os.listdir(distinfo) for fn in files: if (fn not in (u'RECORD', u'INSTALLER', u'SHARED', u'WHEEL')): p = fsdecode(os.path.join(distinfo, fn)) ap = to_posix(os.path.join(info_dir, fn)) archive_paths.append((ap, p)) wheel_metadata = [(u'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version)), (u'Generator: distlib %s' % __version__), (u'Root-Is-Purelib: %s' % is_pure)] for (pyver, abi, arch) in self.tags: wheel_metadata.append((u'Tag: %s-%s-%s' % (pyver, abi, arch))) p = os.path.join(distinfo, u'WHEEL') with open(p, u'w') as f: f.write(u'\n'.join(wheel_metadata)) ap = to_posix(os.path.join(info_dir, u'WHEEL')) archive_paths.append((ap, p)) self.write_records((distinfo, info_dir), libdir, archive_paths) pathname = os.path.join(self.dirname, self.filename) self.build_zip(pathname, archive_paths) return pathname
'Install a wheel to the specified paths. If kwarg ``warner`` is specified, it should be a callable, which will be called with two tuples indicating the wheel version of this software and the wheel version in the file, if there is a discrepancy in the versions. This can be used to issue any warnings to raise any exceptions. If kwarg ``lib_only`` is True, only the purelib/platlib files are installed, and the headers, scripts, data and dist-info metadata are not written. The return value is a :class:`InstalledDistribution` instance unless ``options.lib_only`` is True, in which case the return value is ``None``.'
def install(self, paths, maker, **kwargs):
dry_run = maker.dry_run warner = kwargs.get(u'warner') lib_only = kwargs.get(u'lib_only', False) pathname = os.path.join(self.dirname, self.filename) name_ver = (u'%s-%s' % (self.name, self.version)) data_dir = (u'%s.data' % name_ver) info_dir = (u'%s.dist-info' % name_ver) metadata_name = posixpath.join(info_dir, METADATA_FILENAME) wheel_metadata_name = posixpath.join(info_dir, u'WHEEL') record_name = posixpath.join(info_dir, u'RECORD') wrapper = codecs.getreader(u'utf-8') with ZipFile(pathname, u'r') as zf: with zf.open(wheel_metadata_name) as bwf: wf = wrapper(bwf) message = message_from_file(wf) wv = message[u'Wheel-Version'].split(u'.', 1) file_version = tuple([int(i) for i in wv]) if ((file_version != self.wheel_version) and warner): warner(self.wheel_version, file_version) if (message[u'Root-Is-Purelib'] == u'true'): libdir = paths[u'purelib'] else: libdir = paths[u'platlib'] records = {} with zf.open(record_name) as bf: with CSVReader(stream=bf) as reader: for row in reader: p = row[0] records[p] = row data_pfx = posixpath.join(data_dir, u'') info_pfx = posixpath.join(info_dir, u'') script_pfx = posixpath.join(data_dir, u'scripts', u'') fileop = FileOperator(dry_run=dry_run) fileop.record = True bc = (not sys.dont_write_bytecode) outfiles = [] workdir = tempfile.mkdtemp() maker.source_dir = workdir maker.target_dir = None try: for zinfo in zf.infolist(): arcname = zinfo.filename if isinstance(arcname, text_type): u_arcname = arcname else: u_arcname = arcname.decode(u'utf-8') if u_arcname.endswith(u'/RECORD.jws'): continue row = records[u_arcname] if (row[2] and (str(zinfo.file_size) != row[2])): raise DistlibException((u'size mismatch for %s' % u_arcname)) if row[1]: (kind, value) = row[1].split(u'=', 1) with zf.open(arcname) as bf: data = bf.read() (_, digest) = self.get_hash(data, kind) if (digest != value): raise DistlibException((u'digest mismatch for %s' % arcname)) if (lib_only and u_arcname.startswith((info_pfx, data_pfx))): logger.debug(u'lib_only: skipping %s', u_arcname) continue is_script = (u_arcname.startswith(script_pfx) and (not u_arcname.endswith(u'.exe'))) if u_arcname.startswith(data_pfx): (_, where, rp) = u_arcname.split(u'/', 2) outfile = os.path.join(paths[where], convert_path(rp)) else: if (u_arcname in (wheel_metadata_name, record_name)): continue outfile = os.path.join(libdir, convert_path(u_arcname)) if (not is_script): with zf.open(arcname) as bf: fileop.copy_stream(bf, outfile) outfiles.append(outfile) if ((not dry_run) and row[1]): with open(outfile, u'rb') as bf: data = bf.read() (_, newdigest) = self.get_hash(data, kind) if (newdigest != digest): raise DistlibException((u'digest mismatch on write for %s' % outfile)) if (bc and outfile.endswith(u'.py')): try: pyc = fileop.byte_compile(outfile) outfiles.append(pyc) except Exception: logger.warning(u'Byte-compilation failed', exc_info=True) else: fn = os.path.basename(convert_path(arcname)) workname = os.path.join(workdir, fn) with zf.open(arcname) as bf: fileop.copy_stream(bf, workname) (dn, fn) = os.path.split(outfile) maker.target_dir = dn filenames = maker.make(fn) fileop.set_executable_mode(filenames) outfiles.extend(filenames) if lib_only: logger.debug(u'lib_only: returning None') dist = None else: commands = None file_version = self.info[u'Wheel-Version'] if (file_version == u'1.0'): ep = posixpath.join(info_dir, u'entry_points.txt') try: with zf.open(ep) as bwf: epdata = read_exports(bwf) commands = {} for key in (u'console', u'gui'): k = (u'%s_scripts' % key) if (k in epdata): commands[(u'wrap_%s' % key)] = d = {} for v in epdata[k].values(): s = (u'%s:%s' % (v.prefix, v.suffix)) if v.flags: s += (u' %s' % v.flags) d[v.name] = s except Exception: logger.warning(u'Unable to read legacy script metadata, so cannot generate scripts') else: try: with zf.open(metadata_name) as bwf: wf = wrapper(bwf) commands = json.load(wf).get(u'extensions') if commands: commands = commands.get(u'python.commands') except Exception: logger.warning(u'Unable to read JSON metadata, so cannot generate scripts') if commands: console_scripts = commands.get(u'wrap_console', {}) gui_scripts = commands.get(u'wrap_gui', {}) if (console_scripts or gui_scripts): script_dir = paths.get(u'scripts', u'') if (not os.path.isdir(script_dir)): raise ValueError(u'Valid script path not specified') maker.target_dir = script_dir for (k, v) in console_scripts.items(): script = (u'%s = %s' % (k, v)) filenames = maker.make(script) fileop.set_executable_mode(filenames) if gui_scripts: options = {u'gui': True} for (k, v) in gui_scripts.items(): script = (u'%s = %s' % (k, v)) filenames = maker.make(script, options) fileop.set_executable_mode(filenames) p = os.path.join(libdir, info_dir) dist = InstalledDistribution(p) paths = dict(paths) del paths[u'purelib'] del paths[u'platlib'] paths[u'lib'] = libdir p = dist.write_shared_locations(paths, dry_run) if p: outfiles.append(p) dist.write_installed_files(outfiles, paths[u'prefix'], dry_run) return dist except Exception: logger.exception(u'installation failed.') fileop.rollback() raise finally: shutil.rmtree(workdir)
'Determine if a wheel is compatible with the running system.'
def is_compatible(self):
return is_compatible(self)
'Determine if a wheel is asserted as mountable by its metadata.'
def is_mountable(self):
return True
'Update the contents of a wheel in a generic way. The modifier should be a callable which expects a dictionary argument: its keys are archive-entry paths, and its values are absolute filesystem paths where the contents the corresponding archive entries can be found. The modifier is free to change the contents of the files pointed to, add new entries and remove entries, before returning. This method will extract the entire contents of the wheel to a temporary location, call the modifier, and then use the passed (and possibly updated) dictionary to write a new wheel. If ``dest_dir`` is specified, the new wheel is written there -- otherwise, the original wheel is overwritten. The modifier should return True if it updated the wheel, else False. This method returns the same value the modifier returns.'
def update(self, modifier, dest_dir=None, **kwargs):
def get_version(path_map, info_dir): version = path = None key = (u'%s/%s' % (info_dir, METADATA_FILENAME)) if (key not in path_map): key = (u'%s/PKG-INFO' % info_dir) if (key in path_map): path = path_map[key] version = Metadata(path=path).version return (version, path) def update_version(version, path): updated = None try: v = NormalizedVersion(version) i = version.find(u'-') if (i < 0): updated = (u'%s+1' % version) else: parts = [int(s) for s in version[(i + 1):].split(u'.')] parts[(-1)] += 1 updated = (u'%s+%s' % (version[:i], u'.'.join((str(i) for i in parts)))) except UnsupportedVersionError: logger.debug(u'Cannot update non-compliant (PEP-440) version %r', version) if updated: md = Metadata(path=path) md.version = updated legacy = (not path.endswith(METADATA_FILENAME)) md.write(path=path, legacy=legacy) logger.debug(u'Version updated from %r to %r', version, updated) pathname = os.path.join(self.dirname, self.filename) name_ver = (u'%s-%s' % (self.name, self.version)) info_dir = (u'%s.dist-info' % name_ver) record_name = posixpath.join(info_dir, u'RECORD') with tempdir() as workdir: with ZipFile(pathname, u'r') as zf: path_map = {} for zinfo in zf.infolist(): arcname = zinfo.filename if isinstance(arcname, text_type): u_arcname = arcname else: u_arcname = arcname.decode(u'utf-8') if (u_arcname == record_name): continue if (u'..' in u_arcname): raise DistlibException((u'invalid entry in wheel: %r' % u_arcname)) zf.extract(zinfo, workdir) path = os.path.join(workdir, convert_path(u_arcname)) path_map[u_arcname] = path (original_version, _) = get_version(path_map, info_dir) modified = modifier(path_map, **kwargs) if modified: (current_version, path) = get_version(path_map, info_dir) if (current_version and (current_version == original_version)): update_version(current_version, path) if (dest_dir is None): (fd, newpath) = tempfile.mkstemp(suffix=u'.whl', prefix=u'wheel-update-', dir=workdir) os.close(fd) else: if (not os.path.isdir(dest_dir)): raise DistlibException((u'Not a directory: %r' % dest_dir)) newpath = os.path.join(dest_dir, self.filename) archive_paths = list(path_map.items()) distinfo = os.path.join(workdir, info_dir) info = (distinfo, info_dir) self.write_records(info, workdir, archive_paths) self.build_zip(newpath, archive_paths) if (dest_dir is None): shutil.copyfile(newpath, pathname) return modified
'Initialise an instance. :param base: The base directory to explore under.'
def __init__(self, base=None):
self.base = os.path.abspath(os.path.normpath((base or os.getcwd()))) self.prefix = (self.base + os.sep) self.allfiles = None self.files = set()
'Find all files under the base and set ``allfiles`` to the absolute pathnames of files found.'
def findall(self):
from stat import S_ISREG, S_ISDIR, S_ISLNK self.allfiles = allfiles = [] root = self.base stack = [root] pop = stack.pop push = stack.append while stack: root = pop() names = os.listdir(root) for name in names: fullname = os.path.join(root, name) stat = os.stat(fullname) mode = stat.st_mode if S_ISREG(mode): allfiles.append(fsdecode(fullname)) elif (S_ISDIR(mode) and (not S_ISLNK(mode))): push(fullname)
'Add a file to the manifest. :param item: The pathname to add. This can be relative to the base.'
def add(self, item):
if (not item.startswith(self.prefix)): item = os.path.join(self.base, item) self.files.add(os.path.normpath(item))
'Add a list of files to the manifest. :param items: The pathnames to add. These can be relative to the base.'
def add_many(self, items):
for item in items: self.add(item)
'Return sorted files in directory order'
def sorted(self, wantdirs=False):
def add_dir(dirs, d): dirs.add(d) logger.debug('add_dir added %s', d) if (d != self.base): (parent, _) = os.path.split(d) assert (parent not in ('', '/')) add_dir(dirs, parent) result = set(self.files) if wantdirs: dirs = set() for f in result: add_dir(dirs, os.path.dirname(f)) result |= dirs return [os.path.join(*path_tuple) for path_tuple in sorted((os.path.split(path) for path in result))]
'Clear all collected files.'
def clear(self):
self.files = set() self.allfiles = []
'Process a directive which either adds some files from ``allfiles`` to ``files``, or removes some files from ``files``. :param directive: The directive to process. This should be in a format compatible with distutils ``MANIFEST.in`` files: http://docs.python.org/distutils/sourcedist.html#commands'
def process_directive(self, directive):
(action, patterns, thedir, dirpattern) = self._parse_directive(directive) if (action == 'include'): for pattern in patterns: if (not self._include_pattern(pattern, anchor=True)): logger.warning('no files found matching %r', pattern) elif (action == 'exclude'): for pattern in patterns: found = self._exclude_pattern(pattern, anchor=True) elif (action == 'global-include'): for pattern in patterns: if (not self._include_pattern(pattern, anchor=False)): logger.warning('no files found matching %r anywhere in distribution', pattern) elif (action == 'global-exclude'): for pattern in patterns: found = self._exclude_pattern(pattern, anchor=False) elif (action == 'recursive-include'): for pattern in patterns: if (not self._include_pattern(pattern, prefix=thedir)): logger.warning('no files found matching %r under directory %r', pattern, thedir) elif (action == 'recursive-exclude'): for pattern in patterns: found = self._exclude_pattern(pattern, prefix=thedir) elif (action == 'graft'): if (not self._include_pattern(None, prefix=dirpattern)): logger.warning('no directories found matching %r', dirpattern) elif (action == 'prune'): if (not self._exclude_pattern(None, prefix=dirpattern)): logger.warning('no previously-included directories found matching %r', dirpattern) else: raise DistlibException(('invalid action %r' % action))
'Validate a directive. :param directive: The directive to validate. :return: A tuple of action, patterns, thedir, dir_patterns'
def _parse_directive(self, directive):
words = directive.split() if ((len(words) == 1) and (words[0] not in ('include', 'exclude', 'global-include', 'global-exclude', 'recursive-include', 'recursive-exclude', 'graft', 'prune'))): words.insert(0, 'include') action = words[0] patterns = thedir = dir_pattern = None if (action in ('include', 'exclude', 'global-include', 'global-exclude')): if (len(words) < 2): raise DistlibException(('%r expects <pattern1> <pattern2> ...' % action)) patterns = [convert_path(word) for word in words[1:]] elif (action in ('recursive-include', 'recursive-exclude')): if (len(words) < 3): raise DistlibException(('%r expects <dir> <pattern1> <pattern2> ...' % action)) thedir = convert_path(words[1]) patterns = [convert_path(word) for word in words[2:]] elif (action in ('graft', 'prune')): if (len(words) != 2): raise DistlibException(('%r expects a single <dir_pattern>' % action)) dir_pattern = convert_path(words[1]) else: raise DistlibException(('unknown action %r' % action)) return (action, patterns, thedir, dir_pattern)
'Select strings (presumably filenames) from \'self.files\' that match \'pattern\', a Unix-style wildcard (glob) pattern. Patterns are not quite the same as implemented by the \'fnmatch\' module: \'*\' and \'?\' match non-special characters, where "special" is platform-dependent: slash on Unix; colon, slash, and backslash on DOS/Windows; and colon on Mac OS. If \'anchor\' is true (the default), then the pattern match is more stringent: "*.py" will match "foo.py" but not "foo/bar.py". If \'anchor\' is false, both of these will match. If \'prefix\' is supplied, then only filenames starting with \'prefix\' (itself a pattern) and ending with \'pattern\', with anything in between them, will match. \'anchor\' is ignored in this case. If \'is_regex\' is true, \'anchor\' and \'prefix\' are ignored, and \'pattern\' is assumed to be either a string containing a regex or a regex object -- no translation is done, the regex is just compiled and used as-is. Selected strings will be added to self.files. Return True if files are found.'
def _include_pattern(self, pattern, anchor=True, prefix=None, is_regex=False):
found = False pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) if (self.allfiles is None): self.findall() for name in self.allfiles: if pattern_re.search(name): self.files.add(name) found = True return found
'Remove strings (presumably filenames) from \'files\' that match \'pattern\'. Other parameters are the same as for \'include_pattern()\', above. The list \'self.files\' is modified in place. Return True if files are found. This API is public to allow e.g. exclusion of SCM subdirs, e.g. when packaging source distributions'
def _exclude_pattern(self, pattern, anchor=True, prefix=None, is_regex=False):
found = False pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) for f in list(self.files): if pattern_re.search(f): self.files.remove(f) found = True return found
'Translate a shell-like wildcard pattern to a compiled regular expression. Return the compiled regex. If \'is_regex\' true, then \'pattern\' is directly compiled to a regex (if it\'s a string) or just returned as-is (assumes it\'s a regex object).'
def _translate_pattern(self, pattern, anchor=True, prefix=None, is_regex=False):
if is_regex: if isinstance(pattern, str): return re.compile(pattern) else: return pattern if pattern: pattern_re = self._glob_to_re(pattern) else: pattern_re = '' base = re.escape(os.path.join(self.base, '')) if (prefix is not None): empty_pattern = self._glob_to_re('') prefix_re = self._glob_to_re(prefix)[:(- len(empty_pattern))] sep = os.sep if (os.sep == '\\'): sep = '\\\\' pattern_re = (('^' + base) + sep.join((prefix_re, ('.*' + pattern_re)))) elif anchor: pattern_re = (('^' + base) + pattern_re) return re.compile(pattern_re)
'Translate a shell-like glob pattern to a regular expression. Return a string containing the regex. Differs from \'fnmatch.translate()\' in that \'*\' does not match "special characters" (which are platform-specific).'
def _glob_to_re(self, pattern):
pattern_re = fnmatch.translate(pattern) sep = os.sep if (os.sep == '\\'): sep = '\\\\\\\\' escaped = ('\\1[^%s]' % sep) pattern_re = re.sub('((?<!\\\\)(\\\\\\\\)*)\\.', escaped, pattern_re) return pattern_re
'Return the distribution name with version. If filesafe is true, return a filename-escaped form.'
def get_fullname(self, filesafe=False):
return _get_name_and_version(self[u'Name'], self[u'Version'], filesafe)
'return True if name is a valid metadata key'
def is_field(self, name):
name = self._convert_name(name) return (name in _ALL_FIELDS)
'Read the metadata values from a file path.'
def read(self, filepath):
fp = codecs.open(filepath, u'r', encoding=u'utf-8') try: self.read_file(fp) finally: fp.close()
'Read the metadata values from a file object.'
def read_file(self, fileob):
msg = message_from_file(fileob) self._fields[u'Metadata-Version'] = msg[u'metadata-version'] for field in _ALL_FIELDS: if (field not in msg): continue if (field in _LISTFIELDS): values = msg.get_all(field) if ((field in _LISTTUPLEFIELDS) and (values is not None)): values = [tuple(value.split(u',')) for value in values] self.set(field, values) else: value = msg[field] if ((value is not None) and (value != u'UNKNOWN')): self.set(field, value) self.set_metadata_version()
'Write the metadata fields to filepath.'
def write(self, filepath, skip_unknown=False):
fp = codecs.open(filepath, u'w', encoding=u'utf-8') try: self.write_file(fp, skip_unknown) finally: fp.close()
'Write the PKG-INFO format data to a file object.'
def write_file(self, fileobject, skip_unknown=False):
self.set_metadata_version() for field in _version2fieldlist(self[u'Metadata-Version']): values = self.get(field) if (skip_unknown and (values in (u'UNKNOWN', [], [u'UNKNOWN']))): continue if (field in _ELEMENTSFIELD): self._write_field(fileobject, field, u','.join(values)) continue if (field not in _LISTFIELDS): if (field == u'Description'): if (self.metadata_version in (u'1.0', u'1.1')): values = values.replace(u'\n', u'\n ') else: values = values.replace(u'\n', u'\n |') values = [values] if (field in _LISTTUPLEFIELDS): values = [u','.join(value) for value in values] for value in values: self._write_field(fileobject, field, value)
'Set metadata values from the given iterable `other` and kwargs. Behavior is like `dict.update`: If `other` has a ``keys`` method, they are looped over and ``self[key]`` is assigned ``other[key]``. Else, ``other`` is an iterable of ``(key, value)`` iterables. Keys that don\'t match a metadata field or that have an empty value are dropped.'
def update(self, other=None, **kwargs):
def _set(key, value): if ((key in _ATTR2FIELD) and value): self.set(self._convert_name(key), value) if (not other): pass elif hasattr(other, u'keys'): for k in other.keys(): _set(k, other[k]) else: for (k, v) in other: _set(k, v) if kwargs: for (k, v) in kwargs.items(): _set(k, v)
'Control then set a metadata field.'
def set(self, name, value):
name = self._convert_name(name) if (((name in _ELEMENTSFIELD) or (name == u'Platform')) and (not isinstance(value, (list, tuple)))): if isinstance(value, string_types): value = [v.strip() for v in value.split(u',')] else: value = [] elif ((name in _LISTFIELDS) and (not isinstance(value, (list, tuple)))): if isinstance(value, string_types): value = [value] else: value = [] if logger.isEnabledFor(logging.WARNING): project_name = self[u'Name'] scheme = get_scheme(self.scheme) if ((name in _PREDICATE_FIELDS) and (value is not None)): for v in value: if (not scheme.is_valid_matcher(v.split(u';')[0])): logger.warning(u'%r: %r is not valid (field %r)', project_name, v, name) elif ((name in _VERSIONS_FIELDS) and (value is not None)): if (not scheme.is_valid_constraint_list(value)): logger.warning(u'%r: %r is not a valid version (field %r)', project_name, value, name) elif ((name in _VERSION_FIELDS) and (value is not None)): if (not scheme.is_valid_version(value)): logger.warning(u'%r: %r is not a valid version (field %r)', project_name, value, name) if (name in _UNICODEFIELDS): if (name == u'Description'): value = self._remove_line_prefix(value) self._fields[name] = value
'Get a metadata field.'
def get(self, name, default=_MISSING):
name = self._convert_name(name) if (name not in self._fields): if (default is _MISSING): default = self._default_value(name) return default if (name in _UNICODEFIELDS): value = self._fields[name] return value elif (name in _LISTFIELDS): value = self._fields[name] if (value is None): return [] res = [] for val in value: if (name not in _LISTTUPLEFIELDS): res.append(val) else: res.append((val[0], val[1])) return res elif (name in _ELEMENTSFIELD): value = self._fields[name] if isinstance(value, string_types): return value.split(u',') return self._fields[name]
'Check if the metadata is compliant. If strict is True then raise if no Name or Version are provided'
def check(self, strict=False):
self.set_metadata_version() (missing, warnings) = ([], []) for attr in (u'Name', u'Version'): if (attr not in self): missing.append(attr) if (strict and (missing != [])): msg = (u'missing required metadata: %s' % u', '.join(missing)) raise MetadataMissingError(msg) for attr in (u'Home-page', u'Author'): if (attr not in self): missing.append(attr) if (self[u'Metadata-Version'] != u'1.2'): return (missing, warnings) scheme = get_scheme(self.scheme) def are_valid_constraints(value): for v in value: if (not scheme.is_valid_matcher(v.split(u';')[0])): return False return True for (fields, controller) in ((_PREDICATE_FIELDS, are_valid_constraints), (_VERSIONS_FIELDS, scheme.is_valid_constraint_list), (_VERSION_FIELDS, scheme.is_valid_version)): for field in fields: value = self.get(field, None) if ((value is not None) and (not controller(value))): warnings.append((u'Wrong value for %r: %s' % (field, value))) return (missing, warnings)
'Return fields as a dict. Field names will be converted to use the underscore-lowercase style instead of hyphen-mixed case (i.e. home_page instead of Home-page).'
def todict(self, skip_missing=False):
self.set_metadata_version() mapping_1_0 = ((u'metadata_version', u'Metadata-Version'), (u'name', u'Name'), (u'version', u'Version'), (u'summary', u'Summary'), (u'home_page', u'Home-page'), (u'author', u'Author'), (u'author_email', u'Author-email'), (u'license', u'License'), (u'description', u'Description'), (u'keywords', u'Keywords'), (u'platform', u'Platform'), (u'classifiers', u'Classifier'), (u'download_url', u'Download-URL')) data = {} for (key, field_name) in mapping_1_0: if ((not skip_missing) or (field_name in self._fields)): data[key] = self[field_name] if (self[u'Metadata-Version'] == u'1.2'): mapping_1_2 = ((u'requires_dist', u'Requires-Dist'), (u'requires_python', u'Requires-Python'), (u'requires_external', u'Requires-External'), (u'provides_dist', u'Provides-Dist'), (u'obsoletes_dist', u'Obsoletes-Dist'), (u'project_url', u'Project-URL'), (u'maintainer', u'Maintainer'), (u'maintainer_email', u'Maintainer-email')) for (key, field_name) in mapping_1_2: if ((not skip_missing) or (field_name in self._fields)): if (key != u'project_url'): data[key] = self[field_name] else: data[key] = [u','.join(u) for u in self[field_name]] elif (self[u'Metadata-Version'] == u'1.1'): mapping_1_1 = ((u'provides', u'Provides'), (u'requires', u'Requires'), (u'obsoletes', u'Obsoletes')) for (key, field_name) in mapping_1_1: if ((not skip_missing) or (field_name in self._fields)): data[key] = self[field_name] return data
'Base method to get dependencies, given a set of extras to satisfy and an optional environment context. :param reqts: A list of sometimes-wanted dependencies, perhaps dependent on extras and environment. :param extras: A list of optional components being requested. :param env: An optional environment for marker evaluation.'
def get_requirements(self, reqts, extras=None, env=None):
if self._legacy: result = reqts else: result = [] extras = get_extras((extras or []), self.extras) for d in reqts: if ((u'extra' not in d) and (u'environment' not in d)): include = True else: if (u'extra' not in d): include = True else: include = (d.get(u'extra') in extras) if include: marker = d.get(u'environment') if marker: include = interpret(marker, env) if include: result.extend(d[u'requires']) for key in (u'build', u'dev', u'test'): e = (u':%s:' % key) if (e in extras): extras.remove(e) reqts = self._data.get((u'%s_requires' % key), []) result.extend(self.get_requirements(reqts, extras=extras, env=env)) return result
'Make a script. :param specification: The specification, which is either a valid export entry specification (to make a script from a callable) or a filename (to make a script by copying from a source location). :param options: A dictionary of options controlling script generation. :return: A list of all absolute pathnames written to.'
def make(self, specification, options=None):
filenames = [] entry = get_export_entry(specification) if (entry is None): self._copy_script(specification, filenames) else: self._make_script(entry, filenames, options=options) return filenames
'Take a list of specifications and make scripts from them, :param specifications: A list of specifications. :return: A list of all absolute pathnames written to,'
def make_multiple(self, specifications, options=None):
filenames = [] for specification in specifications: filenames.extend(self.make(specification, options)) return filenames
'Initialise an instance. :param scheme: Because locators look for most recent versions, they need to know the version scheme to use. This specifies the current PEP-recommended scheme - use ``\'legacy\'`` if you need to support existing distributions on PyPI.'
def __init__(self, scheme='default'):
self._cache = {} self.scheme = scheme self.opener = build_opener(RedirectHandler()) self.matcher = None
'For a given project, get a dictionary mapping available versions to Distribution instances. This should be implemented in subclasses. If called from a locate() request, self.matcher will be set to a matcher for the requirement to satisfy, otherwise it will be None.'
def _get_project(self, name):
raise NotImplementedError('Please implement in the subclass')
'Return all the distribution names known to this locator.'
def get_distribution_names(self):
raise NotImplementedError('Please implement in the subclass')
'For a given project, get a dictionary mapping available versions to Distribution instances. This calls _get_project to do all the work, and just implements a caching layer on top.'
def get_project(self, name):
if (self._cache is None): result = self._get_project(name) elif (name in self._cache): result = self._cache[name] else: result = self._get_project(name) self._cache[name] = result return result
'Give an url a score which can be used to choose preferred URLs for a given project release.'
def score_url(self, url):
t = urlparse(url) basename = posixpath.basename(t.path) compatible = True is_wheel = basename.endswith('.whl') if is_wheel: compatible = is_compatible(Wheel(basename), self.wheel_tags) return ((t.scheme != 'https'), ('pypi.python.org' in t.netloc), is_wheel, compatible, basename)
'Choose one of two URLs where both are candidates for distribution archives for the same version of a distribution (for example, .tar.gz vs. zip). The current implementation favours https:// URLs over http://, archives from PyPI over those from other locations, wheel compatibility (if a wheel) and then the archive name.'
def prefer_url(self, url1, url2):
result = url2 if url1: s1 = self.score_url(url1) s2 = self.score_url(url2) if (s1 > s2): result = url1 if (result != url2): logger.debug('Not replacing %r with %r', url1, url2) else: logger.debug('Replacing %r with %r', url1, url2) return result
'Attempt to split a filename in project name, version and Python version.'
def split_filename(self, filename, project_name):
return split_filename(filename, project_name)
'See if a URL is a candidate for a download URL for a project (the URL has typically been scraped from an HTML page). If it is, a dictionary is returned with keys "name", "version", "filename" and "url"; otherwise, None is returned.'
def convert_url_to_download_info(self, url, project_name):
def same_project(name1, name2): (name1, name2) = (name1.lower(), name2.lower()) if (name1 == name2): result = True else: result = (name1.replace('_', '-') == name2.replace('_', '-')) return result result = None (scheme, netloc, path, params, query, frag) = urlparse(url) if frag.lower().startswith('egg='): logger.debug('%s: version hint in fragment: %r', project_name, frag) m = HASHER_HASH.match(frag) if m: (algo, digest) = m.groups() else: (algo, digest) = (None, None) origpath = path if (path and (path[(-1)] == '/')): path = path[:(-1)] if path.endswith('.whl'): try: wheel = Wheel(path) if is_compatible(wheel, self.wheel_tags): if (project_name is None): include = True else: include = same_project(wheel.name, project_name) if include: result = {'name': wheel.name, 'version': wheel.version, 'filename': wheel.filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), 'python-version': ', '.join(['.'.join(list(v[2:])) for v in wheel.pyver])} except Exception as e: logger.warning('invalid path for wheel: %s', path) elif path.endswith(self.downloadable_extensions): path = filename = posixpath.basename(path) for ext in self.downloadable_extensions: if path.endswith(ext): path = path[:(- len(ext))] t = self.split_filename(path, project_name) if (not t): logger.debug('No match for project/version: %s', path) else: (name, version, pyver) = t if ((not project_name) or same_project(project_name, name)): result = {'name': name, 'version': version, 'filename': filename, 'url': urlunparse((scheme, netloc, origpath, params, query, ''))} if pyver: result['python-version'] = pyver break if (result and algo): result[('%s_digest' % algo)] = digest return result
'Get a digest from a dictionary by looking at keys of the form \'algo_digest\'. Returns a 2-tuple (algo, digest) if found, else None. Currently looks only for SHA256, then MD5.'
def _get_digest(self, info):
result = None for algo in ('sha256', 'md5'): key = ('%s_digest' % algo) if (key in info): result = (algo, info[key]) break return result
'Update a result dictionary (the final result from _get_project) with a dictionary for a specific version, which typically holds information gleaned from a filename or URL for an archive for the distribution.'
def _update_version_data(self, result, info):
name = info.pop('name') version = info.pop('version') if (version in result): dist = result[version] md = dist.metadata else: dist = make_dist(name, version, scheme=self.scheme) md = dist.metadata dist.digest = digest = self._get_digest(info) url = info['url'] result['digests'][url] = digest if (md.source_url != info['url']): md.source_url = self.prefer_url(md.source_url, url) result['urls'].setdefault(version, set()).add(url) dist.locator = self result[version] = dist
'Find the most recent distribution which matches the given requirement. :param requirement: A requirement of the form \'foo (1.0)\' or perhaps \'foo (>= 1.0, < 2.0, != 1.3)\' :param prereleases: If ``True``, allow pre-release versions to be located. Otherwise, pre-release versions are not returned. :return: A :class:`Distribution` instance, or ``None`` if no such distribution could be located.'
def locate(self, requirement, prereleases=False):
result = None r = parse_requirement(requirement) if (r is None): raise DistlibException(('Not a valid requirement: %r' % requirement)) scheme = get_scheme(self.scheme) self.matcher = matcher = scheme.matcher(r.requirement) logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) versions = self.get_project(r.name) if (len(versions) > 2): slist = [] vcls = matcher.version_class for k in versions: if (k in ('urls', 'digests')): continue try: if (not matcher.match(k)): logger.debug('%s did not match %r', matcher, k) elif (prereleases or (not vcls(k).is_prerelease)): slist.append(k) else: logger.debug('skipping pre-release version %s of %s', k, matcher.name) except Exception: logger.warning('error matching %s with %r', matcher, k) pass if (len(slist) > 1): slist = sorted(slist, key=scheme.key) if slist: logger.debug('sorted list: %s', slist) version = slist[(-1)] result = versions[version] if result: if r.extras: result.extras = r.extras result.download_urls = versions.get('urls', {}).get(version, set()) d = {} sd = versions.get('digests', {}) for url in result.download_urls: if (url in sd): d[url] = sd[url] result.digests = d self.matcher = None return result
'Initialise an instance. :param url: The URL to use for XML-RPC. :param kwargs: Passed to the superclass constructor.'
def __init__(self, url, **kwargs):
super(PyPIRPCLocator, self).__init__(**kwargs) self.base_url = url self.client = ServerProxy(url, timeout=3.0)
'Return all the distribution names known to this locator.'
def get_distribution_names(self):
return set(self.client.list_packages())
'Return all the distribution names known to this locator.'
def get_distribution_names(self):
raise NotImplementedError('Not available from this locator')
'Initialise an instance with the Unicode page contents and the URL they came from.'
def __init__(self, data, url):
self.data = data self.base_url = self.url = url m = self._base.search(self.data) if m: self.base_url = m.group(1)
'Return the URLs of all the links on a page together with information about their "rel" attribute, for determining which ones to treat as downloads and which ones to queue for further scraping.'
@cached_property def links(self):
def clean(url): 'Tidy up an URL.' (scheme, netloc, path, params, query, frag) = urlparse(url) return urlunparse((scheme, netloc, quote(path), params, query, frag)) result = set() for match in self._href.finditer(self.data): d = match.groupdict('') rel = (d['rel1'] or d['rel2'] or d['rel3'] or d['rel4'] or d['rel5'] or d['rel6']) url = (d['url1'] or d['url2'] or d['url3']) url = urljoin(self.base_url, url) url = unescape(url) url = self._clean_re.sub((lambda m: ('%%%2x' % ord(m.group(0)))), url) result.add((url, rel)) result = sorted(result, key=(lambda t: t[0]), reverse=True) return result
'Initialise an instance. :param url: The root URL to use for scraping. :param timeout: The timeout, in seconds, to be applied to requests. This defaults to ``None`` (no timeout specified). :param num_workers: The number of worker threads you want to do I/O, This defaults to 10. :param kwargs: Passed to the superclass.'
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
super(SimpleScrapingLocator, self).__init__(**kwargs) self.base_url = ensure_slash(url) self.timeout = timeout self._page_cache = {} self._seen = set() self._to_fetch = queue.Queue() self._bad_hosts = set() self.skip_externals = False self.num_workers = num_workers self._lock = threading.RLock() self._gplock = threading.RLock()