text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Given a path or URI, check for a ref and split it from the path if it is present,
<END_TASK>
<USER_TASK:>
Description:
def split_ref_from_uri(uri):
# type: (AnyStr) -> Tuple[AnyStr, Optional[AnyStr]]
"""
Given a path or URI, check for a ref and split it from the path if it is present,
returning a tuple of the original input and the ref or None.
:param AnyStr uri: The path or URI to split
:returns: A 2-tuple of the path or URI and the ref
:rtype: Tuple[AnyStr, Optional[AnyStr]]
""" |
if not isinstance(uri, six.string_types):
raise TypeError("Expected a string, received {0!r}".format(uri))
parsed = urllib_parse.urlparse(uri)
path = parsed.path
ref = None
if "@" in path:
path, _, ref = path.rpartition("@")
parsed = parsed._replace(path=path)
return (urllib_parse.urlunparse(parsed), ref) |
<SYSTEM_TASK:>
Get a standardized key for an InstallRequirement.
<END_TASK>
<USER_TASK:>
Description:
def key_from_ireq(ireq):
"""Get a standardized key for an InstallRequirement.""" |
if ireq.req is None and ireq.link is not None:
return str(ireq.link)
else:
return key_from_req(ireq.req) |
<SYSTEM_TASK:>
Formats a packaging.requirements.Requirement with a lowercase name.
<END_TASK>
<USER_TASK:>
Description:
def _requirement_to_str_lowercase_name(requirement):
"""
Formats a packaging.requirements.Requirement with a lowercase name.
This is simply a copy of
https://github.com/pypa/packaging/blob/16.8/packaging/requirements.py#L109-L124
modified to lowercase the dependency name.
Previously, we were invoking the original Requirement.__str__ method and
lower-casing the entire result, which would lowercase the name, *and* other,
important stuff that should not be lower-cased (such as the marker). See
this issue for more information: https://github.com/pypa/pipenv/issues/2113.
""" |
parts = [requirement.name.lower()]
if requirement.extras:
parts.append("[{0}]".format(",".join(sorted(requirement.extras))))
if requirement.specifier:
parts.append(str(requirement.specifier))
if requirement.url:
parts.append("@ {0}".format(requirement.url))
if requirement.marker:
parts.append("; {0}".format(requirement.marker))
return "".join(parts) |
<SYSTEM_TASK:>
Generic formatter for pretty printing the specifier part of
<END_TASK>
<USER_TASK:>
Description:
def format_specifier(ireq):
"""
Generic formatter for pretty printing the specifier part of
InstallRequirements to the terminal.
""" |
# TODO: Ideally, this is carried over to the pip library itself
specs = ireq.specifier._specs if ireq.req is not None else []
specs = sorted(specs, key=lambda x: x._spec[1])
return ",".join(str(s) for s in specs) or "<any>" |
<SYSTEM_TASK:>
Get a cleaned list of all the candidates with valid specifiers in the `requires_python` attributes.
<END_TASK>
<USER_TASK:>
Description:
def clean_requires_python(candidates):
"""Get a cleaned list of all the candidates with valid specifiers in the `requires_python` attributes.""" |
all_candidates = []
sys_version = ".".join(map(str, sys.version_info[:3]))
from packaging.version import parse as parse_version
py_version = parse_version(os.environ.get("PIP_PYTHON_VERSION", sys_version))
for c in candidates:
from_location = attrgetter("location.requires_python")
requires_python = getattr(c, "requires_python", from_location(c))
if requires_python:
# Old specifications had people setting this to single digits
# which is effectively the same as '>=digit,<digit+1'
if requires_python.isdigit():
requires_python = ">={0},<{1}".format(
requires_python, int(requires_python) + 1
)
try:
specifierset = SpecifierSet(requires_python)
except InvalidSpecifier:
continue
else:
if not specifierset.contains(py_version):
continue
all_candidates.append(c)
return all_candidates |
<SYSTEM_TASK:>
Given a packager name, get the variants of its name for both the canonicalized
<END_TASK>
<USER_TASK:>
Description:
def get_name_variants(pkg):
# type: (STRING_TYPE) -> Set[STRING_TYPE]
"""
Given a packager name, get the variants of its name for both the canonicalized
and "safe" forms.
:param AnyStr pkg: The package to lookup
:returns: A list of names.
:rtype: Set
""" |
if not isinstance(pkg, six.string_types):
raise TypeError("must provide a string to derive package names")
from pkg_resources import safe_name
from packaging.utils import canonicalize_name
pkg = pkg.lower()
names = {safe_name(pkg), canonicalize_name(pkg), pkg.replace("-", "_")}
return names |
<SYSTEM_TASK:>
Return the distribution name with version.
<END_TASK>
<USER_TASK:>
Description:
def _get_name_and_version(name, version, for_filename=False):
"""Return the distribution name with version.
If for_filename is true, return a filename-escaped form.""" |
if for_filename:
# For both name and version any runs of non-alphanumeric or '.'
# characters are replaced with a single '-'. Additionally any
# spaces in the version string become '.'
name = _FILESAFE.sub('-', name)
version = _FILESAFE.sub('-', version.replace(' ', '.'))
return '%s-%s' % (name, version) |
<SYSTEM_TASK:>
Read the metadata values from a file path.
<END_TASK>
<USER_TASK:>
Description:
def read(self, filepath):
"""Read the metadata values from a file path.""" |
fp = codecs.open(filepath, 'r', encoding='utf-8')
try:
self.read_file(fp)
finally:
fp.close() |
<SYSTEM_TASK:>
Write the metadata fields to filepath.
<END_TASK>
<USER_TASK:>
Description:
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath.""" |
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown)
finally:
fp.close() |
<SYSTEM_TASK:>
Set metadata values from the given iterable `other` and kwargs.
<END_TASK>
<USER_TASK:>
Description:
def update(self, other=None, **kwargs):
"""Set metadata values from the given iterable `other` and kwargs.
Behavior is like `dict.update`: If `other` has a ``keys`` method,
they are looped over and ``self[key]`` is assigned ``other[key]``.
Else, ``other`` is an iterable of ``(key, value)`` iterables.
Keys that don't match a metadata field or that have an empty value are
dropped.
""" |
def _set(key, value):
if key in _ATTR2FIELD and value:
self.set(self._convert_name(key), value)
if not other:
# other is None or empty container
pass
elif hasattr(other, 'keys'):
for k in other.keys():
_set(k, other[k])
else:
for k, v in other:
_set(k, v)
if kwargs:
for k, v in kwargs.items():
_set(k, v) |
<SYSTEM_TASK:>
Control then set a metadata field.
<END_TASK>
<USER_TASK:>
Description:
def set(self, name, value):
"""Control then set a metadata field.""" |
name = self._convert_name(name)
if ((name in _ELEMENTSFIELD or name == 'Platform') and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [v.strip() for v in value.split(',')]
else:
value = []
elif (name in _LISTFIELDS and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [value]
else:
value = []
if logger.isEnabledFor(logging.WARNING):
project_name = self['Name']
scheme = get_scheme(self.scheme)
if name in _PREDICATE_FIELDS and value is not None:
for v in value:
# check that the values are valid
if not scheme.is_valid_matcher(v.split(';')[0]):
logger.warning(
"'%s': '%s' is not valid (field '%s')",
project_name, v, name)
# FIXME this rejects UNKNOWN, is that right?
elif name in _VERSIONS_FIELDS and value is not None:
if not scheme.is_valid_constraint_list(value):
logger.warning("'%s': '%s' is not a valid version (field '%s')",
project_name, value, name)
elif name in _VERSION_FIELDS and value is not None:
if not scheme.is_valid_version(value):
logger.warning("'%s': '%s' is not a valid version (field '%s')",
project_name, value, name)
if name in _UNICODEFIELDS:
if name == 'Description':
value = self._remove_line_prefix(value)
self._fields[name] = value |
<SYSTEM_TASK:>
Return fields as a dict.
<END_TASK>
<USER_TASK:>
Description:
def todict(self, skip_missing=False):
"""Return fields as a dict.
Field names will be converted to use the underscore-lowercase style
instead of hyphen-mixed case (i.e. home_page instead of Home-page).
""" |
self.set_metadata_version()
mapping_1_0 = (
('metadata_version', 'Metadata-Version'),
('name', 'Name'),
('version', 'Version'),
('summary', 'Summary'),
('home_page', 'Home-page'),
('author', 'Author'),
('author_email', 'Author-email'),
('license', 'License'),
('description', 'Description'),
('keywords', 'Keywords'),
('platform', 'Platform'),
('classifiers', 'Classifier'),
('download_url', 'Download-URL'),
)
data = {}
for key, field_name in mapping_1_0:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
if self['Metadata-Version'] == '1.2':
mapping_1_2 = (
('requires_dist', 'Requires-Dist'),
('requires_python', 'Requires-Python'),
('requires_external', 'Requires-External'),
('provides_dist', 'Provides-Dist'),
('obsoletes_dist', 'Obsoletes-Dist'),
('project_url', 'Project-URL'),
('maintainer', 'Maintainer'),
('maintainer_email', 'Maintainer-email'),
)
for key, field_name in mapping_1_2:
if not skip_missing or field_name in self._fields:
if key != 'project_url':
data[key] = self[field_name]
else:
data[key] = [','.join(u) for u in self[field_name]]
elif self['Metadata-Version'] == '1.1':
mapping_1_1 = (
('provides', 'Provides'),
('requires', 'Requires'),
('obsoletes', 'Obsoletes'),
)
for key, field_name in mapping_1_1:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
return data |
<SYSTEM_TASK:>
Parse a requirements file and yield InstallRequirement instances.
<END_TASK>
<USER_TASK:>
Description:
def parse_requirements(
filename, # type: str
finder=None, # type: Optional[PackageFinder]
comes_from=None, # type: Optional[str]
options=None, # type: Optional[optparse.Values]
session=None, # type: Optional[PipSession]
constraint=False, # type: bool
wheel_cache=None, # type: Optional[WheelCache]
use_pep517=None # type: Optional[bool]
):
# type: (...) -> Iterator[InstallRequirement]
"""Parse a requirements file and yield InstallRequirement instances.
:param filename: Path or url of requirements file.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: cli options.
:param session: Instance of pip.download.PipSession.
:param constraint: If true, parsing a constraint file rather than
requirements file.
:param wheel_cache: Instance of pip.wheel.WheelCache
:param use_pep517: Value of the --use-pep517 option.
""" |
if session is None:
raise TypeError(
"parse_requirements() missing 1 required keyword argument: "
"'session'"
)
_, content = get_file_content(
filename, comes_from=comes_from, session=session
)
lines_enum = preprocess(content, options)
for line_number, line in lines_enum:
req_iter = process_line(line, filename, line_number, finder,
comes_from, options, session, wheel_cache,
use_pep517=use_pep517, constraint=constraint)
for req in req_iter:
yield req |
<SYSTEM_TASK:>
Split, filter, and join lines, and return a line iterator
<END_TASK>
<USER_TASK:>
Description:
def preprocess(content, options):
# type: (Text, Optional[optparse.Values]) -> ReqFileLines
"""Split, filter, and join lines, and return a line iterator
:param content: the content of the requirements file
:param options: cli options
""" |
lines_enum = enumerate(content.splitlines(), start=1) # type: ReqFileLines
lines_enum = join_lines(lines_enum)
lines_enum = ignore_comments(lines_enum)
lines_enum = skip_regex(lines_enum, options)
lines_enum = expand_env_variables(lines_enum)
return lines_enum |
<SYSTEM_TASK:>
Replace all environment variables that can be retrieved via `os.getenv`.
<END_TASK>
<USER_TASK:>
Description:
def expand_env_variables(lines_enum):
# type: (ReqFileLines) -> ReqFileLines
"""Replace all environment variables that can be retrieved via `os.getenv`.
The only allowed format for environment variables defined in the
requirement file is `${MY_VARIABLE_1}` to ensure two things:
1. Strings that contain a `$` aren't accidentally (partially) expanded.
2. Ensure consistency across platforms for requirement files.
These points are the result of a discusssion on the `github pull
request #3514 <https://github.com/pypa/pip/pull/3514>`_.
Valid characters in variable names follow the `POSIX standard
<http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited
to uppercase letter, digits and the `_` (underscore).
""" |
for line_number, line in lines_enum:
for env_var, var_name in ENV_VAR_RE.findall(line):
value = os.getenv(var_name)
if not value:
continue
line = line.replace(env_var, value)
yield line_number, line |
<SYSTEM_TASK:>
Restore the original SIGINT handler after finishing.
<END_TASK>
<USER_TASK:>
Description:
def finish(self):
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
""" |
super(InterruptibleMixin, self).finish()
signal(SIGINT, self.original_handler) |
<SYSTEM_TASK:>
Iterates over all direct child nodes of the node. This iterates
<END_TASK>
<USER_TASK:>
Description:
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
""" |
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item |
<SYSTEM_TASK:>
Find all the nodes of a given type. If the type is a tuple,
<END_TASK>
<USER_TASK:>
Description:
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
""" |
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result |
<SYSTEM_TASK:>
Reset the context of a node and all child nodes. Per default the
<END_TASK>
<USER_TASK:>
Description:
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
""" |
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self |
<SYSTEM_TASK:>
Set the line numbers of the node and children.
<END_TASK>
<USER_TASK:>
Description:
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children.""" |
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self |
<SYSTEM_TASK:>
Set the environment for all nodes.
<END_TASK>
<USER_TASK:>
Description:
def set_environment(self, environment):
"""Set the environment for all nodes.""" |
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self |
<SYSTEM_TASK:>
Return a const object if the value is representable as
<END_TASK>
<USER_TASK:>
Description:
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
""" |
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment) |
<SYSTEM_TASK:>
Build a wheel from a source directory using PEP 517 hooks.
<END_TASK>
<USER_TASK:>
Description:
def build_wheel(source_dir, wheel_dir, config_settings=None):
"""Build a wheel from a source directory using PEP 517 hooks.
:param str source_dir: Source directory containing pyproject.toml
:param str wheel_dir: Target directory to create wheel in
:param dict config_settings: Options to pass to build backend
This is a blocking function which will run pip in a subprocess to install
build requirements.
""" |
if config_settings is None:
config_settings = {}
requires, backend = _load_pyproject(source_dir)
hooks = Pep517HookCaller(source_dir, backend)
with BuildEnvironment() as env:
env.pip_install(requires)
reqs = hooks.get_requires_for_build_wheel(config_settings)
env.pip_install(reqs)
return hooks.build_wheel(wheel_dir, config_settings) |
<SYSTEM_TASK:>
Build an sdist from a source directory using PEP 517 hooks.
<END_TASK>
<USER_TASK:>
Description:
def build_sdist(source_dir, sdist_dir, config_settings=None):
"""Build an sdist from a source directory using PEP 517 hooks.
:param str source_dir: Source directory containing pyproject.toml
:param str sdist_dir: Target directory to place sdist in
:param dict config_settings: Options to pass to build backend
This is a blocking function which will run pip in a subprocess to install
build requirements.
""" |
if config_settings is None:
config_settings = {}
requires, backend = _load_pyproject(source_dir)
hooks = Pep517HookCaller(source_dir, backend)
with BuildEnvironment() as env:
env.pip_install(requires)
reqs = hooks.get_requires_for_build_sdist(config_settings)
env.pip_install(reqs)
return hooks.build_sdist(sdist_dir, config_settings) |
<SYSTEM_TASK:>
Install dependencies into this env by calling pip in a subprocess
<END_TASK>
<USER_TASK:>
Description:
def pip_install(self, reqs):
"""Install dependencies into this env by calling pip in a subprocess""" |
if not reqs:
return
log.info('Calling pip to install %s', reqs)
check_call([
sys.executable, '-m', 'pip', 'install', '--ignore-installed',
'--prefix', self.path] + list(reqs)) |
<SYSTEM_TASK:>
Move all the children of the current node to newParent.
<END_TASK>
<USER_TASK:>
Description:
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
:arg newParent: the node to move all this node's children to
""" |
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = [] |
<SYSTEM_TASK:>
Check if an element exists between the end of the active
<END_TASK>
<USER_TASK:>
Description:
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false""" |
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False |
<SYSTEM_TASK:>
Switch the function used to insert an element from the
<END_TASK>
<USER_TASK:>
Description:
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again""" |
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal |
<SYSTEM_TASK:>
Create an element and insert it into the tree
<END_TASK>
<USER_TASK:>
Description:
def insertElementTable(self, token):
"""Create an element and insert it into the tree""" |
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element |
<SYSTEM_TASK:>
Evaluate a marker.
<END_TASK>
<USER_TASK:>
Description:
def evaluate(self, environment=None):
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
""" |
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment) |
<SYSTEM_TASK:>
Monkey patch pip.Wheel to allow all wheels
<END_TASK>
<USER_TASK:>
Description:
def _allow_all_wheels():
"""Monkey patch pip.Wheel to allow all wheels
The usual checks against platforms and Python versions are ignored to allow
fetching all available entries in PyPI. This also saves the candidate cache
and set a new one, or else the results from the previous non-patched calls
will interfere.
""" |
original_wheel_supported = Wheel.supported
original_support_index_min = Wheel.support_index_min
Wheel.supported = _wheel_supported
Wheel.support_index_min = _wheel_support_index_min
yield
Wheel.supported = original_wheel_supported
Wheel.support_index_min = original_support_index_min |
<SYSTEM_TASK:>
Create a temporary directory and store its path in self.path
<END_TASK>
<USER_TASK:>
Description:
def create(self):
"""Create a temporary directory and store its path in self.path
""" |
if self.path is not None:
logger.debug(
"Skipped creation of temporary directory: {}".format(self.path)
)
return
# We realpath here because some systems have their default tmpdir
# symlinked to another directory. This tends to confuse build
# scripts, so we canonicalize the path by traversing potential
# symlinks here.
self.path = os.path.realpath(
tempfile.mkdtemp(prefix="pip-{}-".format(self.kind))
)
self._register_finalizer()
logger.debug("Created temporary directory: {}".format(self.path)) |
<SYSTEM_TASK:>
Remove the temporary directory created and reset state
<END_TASK>
<USER_TASK:>
Description:
def cleanup(self):
"""Remove the temporary directory created and reset state
""" |
if getattr(self._finalizer, "detach", None) and self._finalizer.detach():
if os.path.exists(self.path):
try:
rmtree(self.path)
except OSError:
pass
else:
self.path = None |
<SYSTEM_TASK:>
Generates a series of temporary names.
<END_TASK>
<USER_TASK:>
Description:
def _generate_names(cls, name):
"""Generates a series of temporary names.
The algorithm replaces the leading characters in the name
with ones that are valid filesystem characters, but are not
valid package names (for both Python and pip definitions of
package).
""" |
for i in range(1, len(name)):
for candidate in itertools.combinations_with_replacement(
cls.LEADING_CHARS, i - 1):
new_name = '~' + ''.join(candidate) + name[i:]
if new_name != name:
yield new_name
# If we make it this far, we will have to make a longer name
for i in range(len(cls.LEADING_CHARS)):
for candidate in itertools.combinations_with_replacement(
cls.LEADING_CHARS, i):
new_name = '~' + ''.join(candidate) + name
if new_name != name:
yield new_name |
<SYSTEM_TASK:>
Detect the encoding of the given byte string.
<END_TASK>
<USER_TASK:>
Description:
def detect(byte_str):
"""
Detect the encoding of the given byte string.
:param byte_str: The byte sequence to examine.
:type byte_str: ``bytes`` or ``bytearray``
""" |
if not isinstance(byte_str, bytearray):
if not isinstance(byte_str, bytes):
raise TypeError('Expected object of type bytes or bytearray, got: '
'{0}'.format(type(byte_str)))
else:
byte_str = bytearray(byte_str)
detector = UniversalDetector()
detector.feed(byte_str)
return detector.close() |
<SYSTEM_TASK:>
Convert escaped markup back into a text string. This replaces
<END_TASK>
<USER_TASK:>
Description:
def unescape(self):
"""Convert escaped markup back into a text string. This replaces
HTML entities with the characters they represent.
>>> Markup('Main » <em>About</em>').unescape()
'Main » <em>About</em>'
""" |
from ._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ("#x", "#X"):
return unichr(int(name[2:], 16))
elif name.startswith("#"):
return unichr(int(name[1:]))
except ValueError:
pass
# Don't modify unexpected input.
return m.group()
return _entity_re.sub(handle_match, text_type(self)) |
<SYSTEM_TASK:>
Ensure that if a link can be found for this, that it is found.
<END_TASK>
<USER_TASK:>
Description:
def populate_link(self, finder, upgrade, require_hashes):
# type: (PackageFinder, bool, bool) -> None
"""Ensure that if a link can be found for this, that it is found.
Note that self.link may still be None - if Upgrade is False and the
requirement is already installed.
If require_hashes is True, don't use the wheel cache, because cached
wheels, always built locally, have different hashes than the files
downloaded from the index server and thus throw false hash mismatches.
Furthermore, cached wheels at present have undeterministic contents due
to file modification times.
""" |
if self.link is None:
self.link = finder.find_requirement(self, upgrade)
if self._wheel_cache is not None and not require_hashes:
old_link = self.link
self.link = self._wheel_cache.get(self.link, self.name)
if old_link != self.link:
logger.debug('Using cached wheel link: %s', self.link) |
<SYSTEM_TASK:>
Return whether I am pinned to an exact version.
<END_TASK>
<USER_TASK:>
Description:
def is_pinned(self):
# type: () -> bool
"""Return whether I am pinned to an exact version.
For example, some-package==1.2 is pinned; some-package>1.2 is not.
""" |
specifiers = self.specifier
return (len(specifiers) == 1 and
next(iter(specifiers)).operator in {'==', '==='}) |
<SYSTEM_TASK:>
Return a hash-comparer that considers my option- and URL-based
<END_TASK>
<USER_TASK:>
Description:
def hashes(self, trust_internet=True):
# type: (bool) -> Hashes
"""Return a hash-comparer that considers my option- and URL-based
hashes to be known-good.
Hashes in URLs--ones embedded in the requirements file, not ones
downloaded from an index server--are almost peers with ones from
flags. They satisfy --require-hashes (whether it was implicitly or
explicitly activated) but do not activate it. md5 and sha224 are not
allowed in flags, which should nudge people toward good algos. We
always OR all hashes together, even ones from URLs.
:param trust_internet: Whether to trust URL-based (#md5=...) hashes
downloaded from the internet, as by populate_link()
""" |
good_hashes = self.options.get('hashes', {}).copy()
link = self.link if trust_internet else self.original_link
if link and link.hash:
good_hashes.setdefault(link.hash_name, []).append(link.hash)
return Hashes(good_hashes) |
<SYSTEM_TASK:>
Ensure that project metadata is available.
<END_TASK>
<USER_TASK:>
Description:
def prepare_metadata(self):
# type: () -> None
"""Ensure that project metadata is available.
Under PEP 517, call the backend hook to prepare the metadata.
Under legacy processing, call setup.py egg-info.
""" |
assert self.source_dir
with indent_log():
if self.use_pep517:
self.prepare_pep517_metadata()
else:
self.run_egg_info()
if not self.req:
if isinstance(parse_version(self.metadata["Version"]), Version):
op = "=="
else:
op = "==="
self.req = Requirement(
"".join([
self.metadata["Name"],
op,
self.metadata["Version"],
])
)
self._correct_build_location()
else:
metadata_name = canonicalize_name(self.metadata["Name"])
if canonicalize_name(self.req.name) != metadata_name:
logger.warning(
'Generating metadata for package %s '
'produced metadata for project name %s. Fix your '
'#egg=%s fragments.',
self.name, metadata_name, self.name
)
self.req = Requirement(metadata_name) |
<SYSTEM_TASK:>
Return a pkg_resources.Distribution for this requirement
<END_TASK>
<USER_TASK:>
Description:
def get_dist(self):
# type: () -> Distribution
"""Return a pkg_resources.Distribution for this requirement""" |
if self.metadata_directory:
base_dir, distinfo = os.path.split(self.metadata_directory)
metadata = pkg_resources.PathMetadata(
base_dir, self.metadata_directory
)
dist_name = os.path.splitext(distinfo)[0]
typ = pkg_resources.DistInfoDistribution
else:
egg_info = self.egg_info_path.rstrip(os.path.sep)
base_dir = os.path.dirname(egg_info)
metadata = pkg_resources.PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
# https://github.com/python/mypy/issues/1174
typ = pkg_resources.Distribution # type: ignore
return typ(
base_dir,
project_name=dist_name,
metadata=metadata,
) |
<SYSTEM_TASK:>
Uninstall the distribution currently satisfying this requirement.
<END_TASK>
<USER_TASK:>
Description:
def uninstall(self, auto_confirm=False, verbose=False,
use_user_site=False):
# type: (bool, bool, bool) -> Optional[UninstallPathSet]
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
""" |
if not self.check_if_exists(use_user_site):
logger.warning("Skipping %s as it is not installed.", self.name)
return None
dist = self.satisfied_by or self.conflicts_with
uninstalled_pathset = UninstallPathSet.from_dist(dist)
uninstalled_pathset.remove(auto_confirm, verbose)
return uninstalled_pathset |
<SYSTEM_TASK:>
Marshal cmd line args into a requirement set.
<END_TASK>
<USER_TASK:>
Description:
def populate_requirement_set(requirement_set, # type: RequirementSet
args, # type: List[str]
options, # type: Values
finder, # type: PackageFinder
session, # type: PipSession
name, # type: str
wheel_cache # type: Optional[WheelCache]
):
# type: (...) -> None
"""
Marshal cmd line args into a requirement set.
""" |
# NOTE: As a side-effect, options.require_hashes and
# requirement_set.require_hashes may be updated
for filename in options.constraints:
for req_to_add in parse_requirements(
filename,
constraint=True, finder=finder, options=options,
session=session, wheel_cache=wheel_cache):
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for req in args:
req_to_add = install_req_from_line(
req, None, isolated=options.isolated_mode,
use_pep517=options.use_pep517,
wheel_cache=wheel_cache
)
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for req in options.editables:
req_to_add = install_req_from_editable(
req,
isolated=options.isolated_mode,
use_pep517=options.use_pep517,
wheel_cache=wheel_cache
)
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for filename in options.requirements:
for req_to_add in parse_requirements(
filename,
finder=finder, options=options, session=session,
wheel_cache=wheel_cache,
use_pep517=options.use_pep517):
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
# If --require-hashes was a line in a requirements file, tell
# RequirementSet about it:
requirement_set.require_hashes = options.require_hashes
if not (args or options.editables or options.requirements):
opts = {'name': name}
if options.find_links:
raise CommandError(
'You must give at least one requirement to %(name)s '
'(maybe you meant "pip %(name)s %(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
raise CommandError(
'You must give at least one requirement to %(name)s '
'(see "pip help %(name)s")' % opts) |
<SYSTEM_TASK:>
Create a package finder appropriate to this requirement command.
<END_TASK>
<USER_TASK:>
Description:
def _build_package_finder(
self,
options, # type: Values
session, # type: PipSession
platform=None, # type: Optional[str]
python_versions=None, # type: Optional[List[str]]
abi=None, # type: Optional[str]
implementation=None # type: Optional[str]
):
# type: (...) -> PackageFinder
"""
Create a package finder appropriate to this requirement command.
""" |
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug(
'Ignoring indexes: %s',
','.join(redact_password_from_url(url) for url in index_urls),
)
index_urls = []
return PackageFinder(
find_links=options.find_links,
format_control=options.format_control,
index_urls=index_urls,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
session=session,
platform=platform,
versions=python_versions,
abi=abi,
implementation=implementation,
prefer_binary=options.prefer_binary,
) |
<SYSTEM_TASK:>
Determine if `int_` falls into one of the ranges in `ranges`.
<END_TASK>
<USER_TASK:>
Description:
def intranges_contain(int_, ranges):
"""Determine if `int_` falls into one of the ranges in `ranges`.""" |
tuple_ = _encode_range(int_, 0)
pos = bisect.bisect_left(ranges, tuple_)
# we could be immediately ahead of a tuple (start, end)
# with start < int_ <= end
if pos > 0:
left, right = _decode_range(ranges[pos-1])
if left <= int_ < right:
return True
# or we could be immediately behind a tuple (int_, end)
if pos < len(ranges):
left, _ = _decode_range(ranges[pos])
if left == int_:
return True
return False |
<SYSTEM_TASK:>
Return the distro ID of the OS distribution, as a string.
<END_TASK>
<USER_TASK:>
Description:
def id(self):
"""Return the distro ID of the OS distribution, as a string.
For details, see :func:`distro.id`.
""" |
def normalize(distro_id, table):
distro_id = distro_id.lower().replace(' ', '_')
return table.get(distro_id, distro_id)
distro_id = self.os_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_OS_ID)
distro_id = self.lsb_release_attr('distributor_id')
if distro_id:
return normalize(distro_id, NORMALIZED_LSB_ID)
distro_id = self.distro_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_DISTRO_ID)
distro_id = self.uname_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_DISTRO_ID)
return '' |
<SYSTEM_TASK:>
Return the name of the OS distribution, as a string.
<END_TASK>
<USER_TASK:>
Description:
def name(self, pretty=False):
"""
Return the name of the OS distribution, as a string.
For details, see :func:`distro.name`.
""" |
name = self.os_release_attr('name') \
or self.lsb_release_attr('distributor_id') \
or self.distro_release_attr('name') \
or self.uname_attr('name')
if pretty:
name = self.os_release_attr('pretty_name') \
or self.lsb_release_attr('description')
if not name:
name = self.distro_release_attr('name') \
or self.uname_attr('name')
version = self.version(pretty=True)
if version:
name = name + ' ' + version
return name or '' |
<SYSTEM_TASK:>
Return the version of the OS distribution, as a string.
<END_TASK>
<USER_TASK:>
Description:
def version(self, pretty=False, best=False):
"""
Return the version of the OS distribution, as a string.
For details, see :func:`distro.version`.
""" |
versions = [
self.os_release_attr('version_id'),
self.lsb_release_attr('release'),
self.distro_release_attr('version_id'),
self._parse_distro_release_content(
self.os_release_attr('pretty_name')).get('version_id', ''),
self._parse_distro_release_content(
self.lsb_release_attr('description')).get('version_id', ''),
self.uname_attr('release')
]
version = ''
if best:
# This algorithm uses the last version in priority order that has
# the best precision. If the versions are not in conflict, that
# does not matter; otherwise, using the last one instead of the
# first one might be considered a surprise.
for v in versions:
if v.count(".") > version.count(".") or version == '':
version = v
else:
for v in versions:
if v != '':
version = v
break
if pretty and version and self.codename():
version = u'{0} ({1})'.format(version, self.codename())
return version |
<SYSTEM_TASK:>
Return the version of the OS distribution, as a tuple of version
<END_TASK>
<USER_TASK:>
Description:
def version_parts(self, best=False):
"""
Return the version of the OS distribution, as a tuple of version
numbers.
For details, see :func:`distro.version_parts`.
""" |
version_str = self.version(best=best)
if version_str:
version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
matches = version_regex.match(version_str)
if matches:
major, minor, build_number = matches.groups()
return major, minor or '', build_number or ''
return '', '', '' |
<SYSTEM_TASK:>
Return certain machine-readable information about the OS
<END_TASK>
<USER_TASK:>
Description:
def info(self, pretty=False, best=False):
"""
Return certain machine-readable information about the OS
distribution.
For details, see :func:`distro.info`.
""" |
return dict(
id=self.id(),
version=self.version(pretty, best),
version_parts=dict(
major=self.major_version(best),
minor=self.minor_version(best),
build_number=self.build_number(best)
),
like=self.like(),
codename=self.codename(),
) |
<SYSTEM_TASK:>
Get the information items from the specified os-release file.
<END_TASK>
<USER_TASK:>
Description:
def _os_release_info(self):
"""
Get the information items from the specified os-release file.
Returns:
A dictionary containing all information items.
""" |
if os.path.isfile(self.os_release_file):
with open(self.os_release_file) as release_file:
return self._parse_os_release_content(release_file)
return {} |
<SYSTEM_TASK:>
Get the information items from the lsb_release command output.
<END_TASK>
<USER_TASK:>
Description:
def _lsb_release_info(self):
"""
Get the information items from the lsb_release command output.
Returns:
A dictionary containing all information items.
""" |
if not self.include_lsb:
return {}
with open(os.devnull, 'w') as devnull:
try:
cmd = ('lsb_release', '-a')
stdout = subprocess.check_output(cmd, stderr=devnull)
except OSError: # Command not found
return {}
content = stdout.decode(sys.getfilesystemencoding()).splitlines()
return self._parse_lsb_release_content(content) |
<SYSTEM_TASK:>
Parse the output of the lsb_release command.
<END_TASK>
<USER_TASK:>
Description:
def _parse_lsb_release_content(lines):
"""
Parse the output of the lsb_release command.
Parameters:
* lines: Iterable through the lines of the lsb_release output.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
""" |
props = {}
for line in lines:
kv = line.strip('\n').split(':', 1)
if len(kv) != 2:
# Ignore lines without colon.
continue
k, v = kv
props.update({k.replace(' ', '_').lower(): v.strip()})
return props |
<SYSTEM_TASK:>
Get the information items from the specified distro release file.
<END_TASK>
<USER_TASK:>
Description:
def _distro_release_info(self):
"""
Get the information items from the specified distro release file.
Returns:
A dictionary containing all information items.
""" |
if self.distro_release_file:
# If it was specified, we use it and parse what we can, even if
# its file name or content does not match the expected pattern.
distro_info = self._parse_distro_release_file(
self.distro_release_file)
basename = os.path.basename(self.distro_release_file)
# The file name pattern for user-specified distro release files
# is somewhat more tolerant (compared to when searching for the
# file), because we want to use what was specified as best as
# possible.
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
distro_info['id'] = match.group(1)
return distro_info
else:
try:
basenames = os.listdir(_UNIXCONFDIR)
# We sort for repeatability in cases where there are multiple
# distro specific files; e.g. CentOS, Oracle, Enterprise all
# containing `redhat-release` on top of their own.
basenames.sort()
except OSError:
# This may occur when /etc is not readable but we can't be
# sure about the *-release files. Check common entries of
# /etc for information. If they turn out to not be there the
# error is handled in `_parse_distro_release_file()`.
basenames = ['SuSE-release',
'arch-release',
'base-release',
'centos-release',
'fedora-release',
'gentoo-release',
'mageia-release',
'mandrake-release',
'mandriva-release',
'mandrivalinux-release',
'manjaro-release',
'oracle-release',
'redhat-release',
'sl-release',
'slackware-version']
for basename in basenames:
if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
continue
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
filepath = os.path.join(_UNIXCONFDIR, basename)
distro_info = self._parse_distro_release_file(filepath)
if 'name' in distro_info:
# The name is always present if the pattern matches
self.distro_release_file = filepath
distro_info['id'] = match.group(1)
return distro_info
return {} |
<SYSTEM_TASK:>
Parse a distro release file.
<END_TASK>
<USER_TASK:>
Description:
def _parse_distro_release_file(self, filepath):
"""
Parse a distro release file.
Parameters:
* filepath: Path name of the distro release file.
Returns:
A dictionary containing all information items.
""" |
try:
with open(filepath) as fp:
# Only parse the first line. For instance, on SLES there
# are multiple lines. We don't want them...
return self._parse_distro_release_content(fp.readline())
except (OSError, IOError):
# Ignore not being able to read a specific, seemingly version
# related file.
# See https://github.com/nir0s/distro/issues/162
return {} |
<SYSTEM_TASK:>
Parse a line from a distro release file.
<END_TASK>
<USER_TASK:>
Description:
def _parse_distro_release_content(line):
"""
Parse a line from a distro release file.
Parameters:
* line: Line from the distro release file. Must be a unicode string
or a UTF-8 encoded byte string.
Returns:
A dictionary containing all information items.
""" |
if isinstance(line, bytes):
line = line.decode('utf-8')
matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
line.strip()[::-1])
distro_info = {}
if matches:
# regexp ensures non-None
distro_info['name'] = matches.group(3)[::-1]
if matches.group(2):
distro_info['version_id'] = matches.group(2)[::-1]
if matches.group(1):
distro_info['codename'] = matches.group(1)[::-1]
elif line:
distro_info['name'] = line.strip()
return distro_info |
<SYSTEM_TASK:>
Calculate the dependency tree for the package `root_key` and return
<END_TASK>
<USER_TASK:>
Description:
def dependency_tree(installed_keys, root_key):
"""
Calculate the dependency tree for the package `root_key` and return
a collection of all its dependencies. Uses a DFS traversal algorithm.
`installed_keys` should be a {key: requirement} mapping, e.g.
{'django': from_line('django==1.8')}
`root_key` should be the key to return the dependency tree for.
""" |
dependencies = set()
queue = collections.deque()
if root_key in installed_keys:
dep = installed_keys[root_key]
queue.append(dep)
while queue:
v = queue.popleft()
key = key_from_req(v)
if key in dependencies:
continue
dependencies.add(key)
for dep_specifier in v.requires():
dep_name = key_from_req(dep_specifier)
if dep_name in installed_keys:
dep = installed_keys[dep_name]
if dep_specifier.specifier.contains(dep.version):
queue.append(dep)
return dependencies |
<SYSTEM_TASK:>
Calculate which packages should be installed or uninstalled, given a set
<END_TASK>
<USER_TASK:>
Description:
def diff(compiled_requirements, installed_dists):
"""
Calculate which packages should be installed or uninstalled, given a set
of compiled requirements and a list of currently installed modules.
""" |
requirements_lut = {r.link or key_from_req(r.req): r for r in compiled_requirements}
satisfied = set() # holds keys
to_install = set() # holds InstallRequirement objects
to_uninstall = set() # holds keys
pkgs_to_ignore = get_dists_to_ignore(installed_dists)
for dist in installed_dists:
key = key_from_req(dist)
if key not in requirements_lut or not requirements_lut[key].match_markers():
to_uninstall.add(key)
elif requirements_lut[key].specifier.contains(dist.version):
satisfied.add(key)
for key, requirement in requirements_lut.items():
if key not in satisfied and requirement.match_markers():
to_install.add(requirement)
# Make sure to not uninstall any packages that should be ignored
to_uninstall -= set(pkgs_to_ignore)
return (to_install, to_uninstall) |
<SYSTEM_TASK:>
Install and uninstalls the given sets of modules.
<END_TASK>
<USER_TASK:>
Description:
def sync(to_install, to_uninstall, verbose=False, dry_run=False, install_flags=None):
"""
Install and uninstalls the given sets of modules.
""" |
if not to_uninstall and not to_install:
click.echo("Everything up-to-date")
pip_flags = []
if not verbose:
pip_flags += ['-q']
if to_uninstall:
if dry_run:
click.echo("Would uninstall:")
for pkg in to_uninstall:
click.echo(" {}".format(pkg))
else:
check_call([sys.executable, '-m', 'pip', 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))
if to_install:
if install_flags is None:
install_flags = []
if dry_run:
click.echo("Would install:")
for ireq in to_install:
click.echo(" {}".format(format_requirement(ireq)))
else:
# prepare requirement lines
req_lines = []
for ireq in sorted(to_install, key=key_from_ireq):
ireq_hashes = get_hashes_from_ireq(ireq)
req_lines.append(format_requirement(ireq, hashes=ireq_hashes))
# save requirement lines to a temporary file
tmp_req_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)
tmp_req_file.write('\n'.join(req_lines))
tmp_req_file.close()
try:
check_call(
[sys.executable, '-m', 'pip', 'install', '-r', tmp_req_file.name] + pip_flags + install_flags
)
finally:
os.unlink(tmp_req_file.name)
return 0 |
<SYSTEM_TASK:>
Perform a sanity check on the environment.
<END_TASK>
<USER_TASK:>
Description:
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment.""" |
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment |
<SYSTEM_TASK:>
Create a new overlay environment that shares all the data with the
<END_TASK>
<USER_TASK:>
Description:
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, lstrip_blocks=missing,
extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except for cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
""" |
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in iteritems(args):
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in iteritems(self.extensions):
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
return _environment_sanity_check(rv) |
<SYSTEM_TASK:>
Iterates over the extensions by priority.
<END_TASK>
<USER_TASK:>
Description:
def iter_extensions(self):
"""Iterates over the extensions by priority.""" |
return iter(sorted(self.extensions.values(),
key=lambda x: x.priority)) |
<SYSTEM_TASK:>
Invokes a filter on a value the same way the compiler does it.
<END_TASK>
<USER_TASK:>
Description:
def call_filter(self, name, value, args=None, kwargs=None,
context=None, eval_ctx=None):
"""Invokes a filter on a value the same way the compiler does it.
Note that on Python 3 this might return a coroutine in case the
filter is running from an environment in async mode and the filter
supports async execution. It's your responsibility to await this
if needed.
.. versionadded:: 2.7
""" |
func = self.filters.get(name)
if func is None:
fail_for_missing_callable('no filter named %r', name)
args = [value] + list(args or ())
if getattr(func, 'contextfilter', False):
if context is None:
raise TemplateRuntimeError('Attempted to invoke context '
'filter without context')
args.insert(0, context)
elif getattr(func, 'evalcontextfilter', False):
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
elif getattr(func, 'environmentfilter', False):
args.insert(0, self)
return func(*args, **(kwargs or {})) |
<SYSTEM_TASK:>
Parse the sourcecode and return the abstract syntax tree. This
<END_TASK>
<USER_TASK:>
Description:
def parse(self, source, name=None, filename=None):
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja2 extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
""" |
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source) |
<SYSTEM_TASK:>
A handy helper method that returns a callable that accepts keyword
<END_TASK>
<USER_TASK:>
Description:
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
""" |
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none) |
<SYSTEM_TASK:>
Creates a template object from a module. This is used by the
<END_TASK>
<USER_TASK:>
Description:
def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
""" |
return cls._from_namespace(environment, module_dict, globals) |
<SYSTEM_TASK:>
Return the source line number of a line number in the
<END_TASK>
<USER_TASK:>
Description:
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
""" |
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1 |
<SYSTEM_TASK:>
This is a stand-in function for `urllib3.util.parse_url`
<END_TASK>
<USER_TASK:>
Description:
def _get_parsed_url(url):
# type: (S) -> Url
"""
This is a stand-in function for `urllib3.util.parse_url`
The orignal function doesn't handle special characters very well, this simply splits
out the authentication section, creates the parsed url, then puts the authentication
section back in, bypassing validation.
:return: The new, parsed URL object
:rtype: :class:`~urllib3.util.url.Url`
""" |
try:
parsed = urllib3_parse(url)
except ValueError:
scheme, _, url = url.partition("://")
auth, _, url = url.rpartition("@")
url = "{scheme}://{url}".format(scheme=scheme, url=url)
parsed = urllib3_parse(url)._replace(auth=auth)
return parsed |
<SYSTEM_TASK:>
Given a url, remove the password and insert 4 dashes
<END_TASK>
<USER_TASK:>
Description:
def remove_password_from_url(url):
# type: (S) -> S
"""
Given a url, remove the password and insert 4 dashes
:param url: The url to replace the authentication in
:type url: S
:return: The new URL without authentication
:rtype: S
""" |
parsed = _get_parsed_url(url)
if parsed.auth:
auth, _, _ = parsed.auth.partition(":")
return parsed._replace(auth="{auth}:----".format(auth=auth)).url
return parsed.url |
<SYSTEM_TASK:>
Ensures that the environment is good for unicode on Python 3.
<END_TASK>
<USER_TASK:>
Description:
def _verify_python3_env():
"""Ensures that the environment is good for unicode on Python 3.""" |
if PY2:
return
try:
import locale
fs_enc = codecs.lookup(locale.getpreferredencoding()).name
except Exception:
fs_enc = 'ascii'
if fs_enc != 'ascii':
return
extra = ''
if os.name == 'posix':
import subprocess
try:
rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()[0]
except OSError:
rv = b''
good_locales = set()
has_c_utf8 = False
# Make sure we're operating on text here.
if isinstance(rv, bytes):
rv = rv.decode('ascii', 'replace')
for line in rv.splitlines():
locale = line.strip()
if locale.lower().endswith(('.utf-8', '.utf8')):
good_locales.add(locale)
if locale.lower() in ('c.utf8', 'c.utf-8'):
has_c_utf8 = True
extra += '\n\n'
if not good_locales:
extra += (
'Additional information: on this system no suitable UTF-8\n'
'locales were discovered. This most likely requires resolving\n'
'by reconfiguring the locale system.'
)
elif has_c_utf8:
extra += (
'This system supports the C.UTF-8 locale which is recommended.\n'
'You might be able to resolve your issue by exporting the\n'
'following environment variables:\n\n'
' export LC_ALL=C.UTF-8\n'
' export LANG=C.UTF-8'
)
else:
extra += (
'This system lists a couple of UTF-8 supporting locales that\n'
'you can pick from. The following suitable locales were\n'
'discovered: %s'
) % ', '.join(sorted(good_locales))
bad_locale = None
for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'):
if locale and locale.lower().endswith(('.utf-8', '.utf8')):
bad_locale = locale
if locale is not None:
break
if bad_locale is not None:
extra += (
'\n\nClick discovered that you exported a UTF-8 locale\n'
'but the locale system could not pick up from it because\n'
'it does not exist. The exported locale is "%s" but it\n'
'is not supported'
) % bad_locale
raise RuntimeError(
'Click will abort further execution because Python 3 was'
' configured to use ASCII as encoding for the environment.'
' Consult https://click.palletsprojects.com/en/7.x/python3/ for'
' mitigation steps.' + extra
) |
<SYSTEM_TASK:>
Is path is a directory containing setup.py or pyproject.toml?
<END_TASK>
<USER_TASK:>
Description:
def is_installable_dir(path):
# type: (str) -> bool
"""Is path is a directory containing setup.py or pyproject.toml?
""" |
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
pyproject_toml = os.path.join(path, 'pyproject.toml')
if os.path.isfile(pyproject_toml):
return True
return False |
<SYSTEM_TASK:>
Returns true if the page appears to be the index page of an svn repository
<END_TASK>
<USER_TASK:>
Description:
def is_svn_page(html):
# type: (Union[str, Text]) -> Optional[Match[Union[str, Text]]]
"""
Returns true if the page appears to be the index page of an svn repository
""" |
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I)) |
<SYSTEM_TASK:>
Yield pieces of data from a file-like object until EOF.
<END_TASK>
<USER_TASK:>
Description:
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF.""" |
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk |
<SYSTEM_TASK:>
Convert a path to its canonical, case-normalized, absolute version.
<END_TASK>
<USER_TASK:>
Description:
def normalize_path(path, resolve_symlinks=True):
# type: (str, bool) -> str
"""
Convert a path to its canonical, case-normalized, absolute version.
""" |
path = expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path) |
<SYSTEM_TASK:>
Return True if path is within sys.prefix, if we're running in a virtualenv.
<END_TASK>
<USER_TASK:>
Description:
def is_local(path):
# type: (str) -> bool
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
""" |
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix)) |
<SYSTEM_TASK:>
Return True if given Distribution is an editable install.
<END_TASK>
<USER_TASK:>
Description:
def dist_is_editable(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is an editable install.
""" |
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return True
return False |
<SYSTEM_TASK:>
Return the path for the .egg-link file if it exists, otherwise, None.
<END_TASK>
<USER_TASK:>
Description:
def egg_link_path(dist):
# type: (Distribution) -> Optional[str]
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
""" |
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
return None |
<SYSTEM_TASK:>
Get the installed version of dist_name avoiding pkg_resources cache
<END_TASK>
<USER_TASK:>
Description:
def get_installed_version(dist_name, working_set=None):
"""Get the installed version of dist_name avoiding pkg_resources cache""" |
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
if working_set is None:
# We want to avoid having this cached, so we need to construct a new
# working set each time.
working_set = pkg_resources.WorkingSet()
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
return dist.version if dist else None |
<SYSTEM_TASK:>
Return the URL for a VCS requirement.
<END_TASK>
<USER_TASK:>
Description:
def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None):
"""
Return the URL for a VCS requirement.
Args:
repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+").
project_name: the (unescaped) project name.
""" |
egg_project_name = pkg_resources.to_filename(project_name)
req = '{}@{}#egg={}'.format(repo_url, rev, egg_project_name)
if subdir:
req += '&subdirectory={}'.format(subdir)
return req |
<SYSTEM_TASK:>
Parse out and remove the auth information from a netloc.
<END_TASK>
<USER_TASK:>
Description:
def split_auth_from_netloc(netloc):
"""
Parse out and remove the auth information from a netloc.
Returns: (netloc, (username, password)).
""" |
if '@' not in netloc:
return netloc, (None, None)
# Split from the right because that's how urllib.parse.urlsplit()
# behaves if more than one @ is present (which can be checked using
# the password attribute of urlsplit()'s return value).
auth, netloc = netloc.rsplit('@', 1)
if ':' in auth:
# Split from the left because that's how urllib.parse.urlsplit()
# behaves if more than one : is present (which again can be checked
# using the password attribute of the return value)
user_pass = auth.split(':', 1)
else:
user_pass = auth, None
user_pass = tuple(
None if x is None else urllib_unquote(x) for x in user_pass
)
return netloc, user_pass |
<SYSTEM_TASK:>
Protection of pip.exe from modification on Windows
<END_TASK>
<USER_TASK:>
Description:
def protect_pip_from_modification_on_windows(modifying_pip):
"""Protection of pip.exe from modification on Windows
On Windows, any operation modifying pip should be run as:
python -m pip ...
""" |
pip_names = [
"pip.exe",
"pip{}.exe".format(sys.version_info[0]),
"pip{}.{}.exe".format(*sys.version_info[:2])
]
# See https://github.com/pypa/pip/issues/1299 for more discussion
should_show_use_python_msg = (
modifying_pip and
WINDOWS and
os.path.basename(sys.argv[0]) in pip_names
)
if should_show_use_python_msg:
new_command = [
sys.executable, "-m", "pip"
] + sys.argv[1:]
raise CommandError(
'To modify pip, please run the following command:\n{}'
.format(" ".join(new_command))
) |
<SYSTEM_TASK:>
Check if the python version in use match the `requires_python` specifier.
<END_TASK>
<USER_TASK:>
Description:
def check_requires_python(requires_python):
# type: (Optional[str]) -> bool
"""
Check if the python version in use match the `requires_python` specifier.
Returns `True` if the version of python in use matches the requirement.
Returns `False` if the version of python in use does not matches the
requirement.
Raises an InvalidSpecifier if `requires_python` have an invalid format.
""" |
if requires_python is None:
# The package provides no information
return True
requires_python_specifier = specifiers.SpecifierSet(requires_python)
# We only use major.minor.micro
python_version = version.parse('{0}.{1}.{2}'.format(*sys.version_info[:3]))
return python_version in requires_python_specifier |
<SYSTEM_TASK:>
Initialize the enhanced click completion
<END_TASK>
<USER_TASK:>
Description:
def init(complete_options=False, match_incomplete=None):
"""Initialize the enhanced click completion
Parameters
----------
complete_options : bool
always complete the options, even when the user hasn't typed a first dash (Default value = False)
match_incomplete : func
a function with two parameters choice and incomplete. Must return True
if incomplete is a correct match for choice, False otherwise.
""" |
global _initialized
if not _initialized:
_patch()
completion_configuration.complete_options = complete_options
if match_incomplete is not None:
completion_configuration.match_incomplete = match_incomplete
_initialized = True |
<SYSTEM_TASK:>
The list from pypi is really a list of versions. We want a list of
<END_TASK>
<USER_TASK:>
Description:
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
""" |
packages = OrderedDict()
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
if name not in packages.keys():
packages[name] = {
'name': name,
'summary': summary,
'versions': [version],
}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
return list(packages.values()) |
<SYSTEM_TASK:>
Add install_req as a requirement to install.
<END_TASK>
<USER_TASK:>
Description:
def add_requirement(
self,
install_req, # type: InstallRequirement
parent_req_name=None, # type: Optional[str]
extras_requested=None # type: Optional[Iterable[str]]
):
# type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]] # noqa: E501
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:param extras_requested: an iterable of extras used to evaluate the
environment markers.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
""" |
name = install_req.name
# If the markers do not match, ignore this requirement.
if not install_req.match_markers(extras_requested):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
name, install_req.markers,
)
return [], None
# If the wheel is not supported, raise an error.
# Should check this after filtering out based on environment markers to
# allow specifying different wheels based on the environment/OS, in a
# single requirements file.
if install_req.link and install_req.link.is_wheel:
wheel = Wheel(install_req.link.filename)
if self.check_supported_wheels and not wheel.supported():
raise InstallationError(
"%s is not a supported wheel on this platform." %
wheel.filename
)
# This next bit is really a sanity check.
assert install_req.is_direct == (parent_req_name is None), (
"a direct req shouldn't have a parent and also, "
"a non direct req should have a parent"
)
# Unnamed requirements are scanned again and the requirement won't be
# added as a dependency until after scanning.
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req], None
try:
existing_req = self.get_requirement(name)
except KeyError:
existing_req = None
has_conflicting_requirement = (
parent_req_name is None and
existing_req and
not existing_req.constraint and
existing_req.extras == install_req.extras and
existing_req.req.specifier != install_req.req.specifier
)
if has_conflicting_requirement:
raise InstallationError(
"Double requirement given: %s (already in %s, name=%r)"
% (install_req, existing_req, name)
)
# When no existing requirement exists, add the requirement as a
# dependency and it will be scanned again after.
if not existing_req:
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
# We'd want to rescan this requirements later
return [install_req], install_req
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
if install_req.constraint or not existing_req.constraint:
return [], existing_req
does_not_satisfy_constraint = (
install_req.link and
not (
existing_req.link and
install_req.link.path == existing_req.link.path
)
)
if does_not_satisfy_constraint:
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % name,
)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
existing_req.extras = tuple(sorted(
set(existing_req.extras) | set(install_req.extras)
))
logger.debug(
"Setting %s extras to: %s",
existing_req, existing_req.extras,
)
# Return the existing requirement for addition to the parent and
# scanning again.
return [existing_req], existing_req |
<SYSTEM_TASK:>
Resolve what operations need to be done
<END_TASK>
<USER_TASK:>
Description:
def resolve(self, requirement_set):
# type: (RequirementSet) -> None
"""Resolve what operations need to be done
As a side-effect of this method, the packages (and their dependencies)
are downloaded, unpacked and prepared for installation. This
preparation is done by ``pip.operations.prepare``.
Once PyPI has static dependency metadata available, it would be
possible to move the preparation to become a step separated from
dependency resolution.
""" |
# make the wheelhouse
if self.preparer.wheel_download_dir:
ensure_dir(self.preparer.wheel_download_dir)
# If any top-level requirement has a hash specified, enter
# hash-checking mode, which requires hashes from all.
root_reqs = (
requirement_set.unnamed_requirements +
list(requirement_set.requirements.values())
)
self.require_hashes = (
requirement_set.require_hashes or
any(req.has_hash_options for req in root_reqs)
)
# Display where finder is looking for packages
locations = self.finder.get_formatted_locations()
if locations:
logger.info(locations)
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# req.populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = [] # type: List[InstallRequirement]
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(
self._resolve_one(requirement_set, req)
)
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors |
<SYSTEM_TASK:>
Set a requirement to be installed.
<END_TASK>
<USER_TASK:>
Description:
def _set_req_to_reinstall(self, req):
# type: (InstallRequirement) -> None
"""
Set a requirement to be installed.
""" |
# Don't uninstall the conflict if doing a user install and the
# conflict is not a user install.
if not self.use_user_site or dist_in_usersite(req.satisfied_by):
req.conflicts_with = req.satisfied_by
req.satisfied_by = None |
<SYSTEM_TASK:>
Check if req_to_install should be skipped.
<END_TASK>
<USER_TASK:>
Description:
def _check_skip_installed(self, req_to_install):
# type: (InstallRequirement) -> Optional[str]
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
""" |
if self.ignore_installed:
return None
req_to_install.check_if_exists(self.use_user_site)
if not req_to_install.satisfied_by:
return None
if self.force_reinstall:
self._set_req_to_reinstall(req_to_install)
return None
if not self._is_upgrade_allowed(req_to_install):
if self.upgrade_strategy == "only-if-needed":
return 'already satisfied, skipping upgrade'
return 'already satisfied'
# Check for the possibility of an upgrade. For link-based
# requirements we have to pull the tree down and inspect to assess
# the version #, so it's handled way down.
if not req_to_install.link:
try:
self.finder.find_requirement(req_to_install, upgrade=True)
except BestVersionAlreadyInstalled:
# Then the best version is installed.
return 'already up-to-date'
except DistributionNotFound:
# No distribution found, so we squash the error. It will
# be raised later when we re-try later to do the install.
# Why don't we just raise here?
pass
self._set_req_to_reinstall(req_to_install)
return None |
<SYSTEM_TASK:>
Takes a InstallRequirement and returns a single AbstractDist \
<END_TASK>
<USER_TASK:>
Description:
def _get_abstract_dist_for(self, req):
# type: (InstallRequirement) -> DistAbstraction
"""Takes a InstallRequirement and returns a single AbstractDist \
representing a prepared variant of the same.
""" |
assert self.require_hashes is not None, (
"require_hashes should have been set in Resolver.resolve()"
)
if req.editable:
return self.preparer.prepare_editable_requirement(
req, self.require_hashes, self.use_user_site, self.finder,
)
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req.satisfied_by is None
skip_reason = self._check_skip_installed(req)
if req.satisfied_by:
return self.preparer.prepare_installed_requirement(
req, self.require_hashes, skip_reason
)
upgrade_allowed = self._is_upgrade_allowed(req)
abstract_dist = self.preparer.prepare_linked_requirement(
req, self.session, self.finder, upgrade_allowed,
self.require_hashes
)
# NOTE
# The following portion is for determining if a certain package is
# going to be re-installed/upgraded or not and reporting to the user.
# This should probably get cleaned up in a future refactor.
# req.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req.check_if_exists(self.use_user_site)
if req.satisfied_by:
should_modify = (
self.upgrade_strategy != "to-satisfy-only" or
self.force_reinstall or
self.ignore_installed or
req.link.scheme == 'file'
)
if should_modify:
self._set_req_to_reinstall(req)
else:
logger.info(
'Requirement already satisfied (use --upgrade to upgrade):'
' %s', req,
)
return abstract_dist |
<SYSTEM_TASK:>
Prepare a single requirements file.
<END_TASK>
<USER_TASK:>
Description:
def _resolve_one(
self,
requirement_set, # type: RequirementSet
req_to_install, # type: InstallRequirement
ignore_requires_python=False # type: bool
):
# type: (...) -> List[InstallRequirement]
"""Prepare a single requirements file.
:return: A list of additional InstallRequirements to also install.
""" |
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
# register tmp src for cleanup in case something goes wrong
requirement_set.reqs_to_cleanup.append(req_to_install)
abstract_dist = self._get_abstract_dist_for(req_to_install)
# Parse and return dependencies
dist = abstract_dist.dist()
try:
check_dist_requires_python(dist)
except UnsupportedPythonVersion as err:
if self.ignore_requires_python or ignore_requires_python or self.ignore_compatibility:
logger.warning(err.args[0])
else:
raise
# A huge hack, by Kenneth Reitz.
try:
self.requires_python = check_dist_requires_python(dist, absorb=False)
except TypeError:
self.requires_python = None
more_reqs = [] # type: List[InstallRequirement]
def add_req(subreq, extras_requested):
sub_install_req = install_req_from_req_string(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self.wheel_cache,
use_pep517=self.use_pep517
)
parent_req_name = req_to_install.name
to_scan_again, add_to_parent = requirement_set.add_requirement(
sub_install_req,
parent_req_name=parent_req_name,
extras_requested=extras_requested,
)
if parent_req_name and add_to_parent:
self._discovered_dependencies[parent_req_name].append(
add_to_parent
)
more_reqs.extend(to_scan_again)
with indent_log():
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not requirement_set.has_requirement(req_to_install.name):
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
# 'unnamed' requirements will get added here
req_to_install.is_direct = True
requirement_set.add_requirement(
req_to_install, parent_req_name=None,
extras_requested=available_requested,
)
if not self.ignore_dependencies:
if req_to_install.extras:
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq, extras_requested=available_requested)
# Hack for deep-resolving extras.
for available in available_requested:
if hasattr(dist, '_DistInfoDistribution__dep_map'):
for req in dist._DistInfoDistribution__dep_map[available]:
req = InstallRequirement(
req,
req_to_install,
isolated=self.isolated,
wheel_cache=self.wheel_cache,
use_pep517=None
)
more_reqs.append(req)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
requirement_set.successfully_downloaded.append(req_to_install)
return more_reqs |
<SYSTEM_TASK:>
Create the installation order.
<END_TASK>
<USER_TASK:>
Description:
def get_installation_order(self, req_set):
# type: (RequirementSet) -> List[InstallRequirement]
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
""" |
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set() # type: Set[InstallRequirement]
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._discovered_dependencies[req.name]:
schedule(dep)
order.append(req)
for install_req in req_set.requirements.values():
schedule(install_req)
return order |
<SYSTEM_TASK:>
Returns the line of text containing loc within a string, counting newlines as line separators.
<END_TASK>
<USER_TASK:>
Description:
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
""" |
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:] |
<SYSTEM_TASK:>
Decorator for debugging parse actions.
<END_TASK>
<USER_TASK:>
Description:
def traceParseAction(f):
"""Decorator for debugging parse actions.
When the parse action is called, this decorator will print
``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.
When the parse action completes, the decorator will print
``"<<"`` followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens))))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
""" |
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z |
<SYSTEM_TASK:>
Helper to define a delimited list of expressions - the delimiter
<END_TASK>
<USER_TASK:>
Description:
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter
defaults to ','. By default, the list elements and delimiters can
have intervening whitespace, and comments, but this can be
overridden by passing ``combine=True`` in the constructor. If
``combine`` is set to ``True``, the matching tokens are
returned as a single token string, with the delimiters included;
otherwise, the matching tokens are returned as a list of tokens,
with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
""" |
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) |
<SYSTEM_TASK:>
Helper to return the original, untokenized text for a given
<END_TASK>
<USER_TASK:>
Description:
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``asString`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`originalTextFor` contains expressions with defined
results names, you must set ``asString`` to ``False`` if you
want to preserve those results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
""" |
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr |
<SYSTEM_TASK:>
Helper to decorate a returned token with its starting and ending
<END_TASK>
<USER_TASK:>
Description:
def locatedExpr(expr):
"""Helper to decorate a returned token with its starting and ending
locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains ``<TAB>`` characters, you
may want to call :class:`ParserElement.parseWithTabs`
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
""" |
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) |
<SYSTEM_TASK:>
Helper method for defining parse actions that require matching at
<END_TASK>
<USER_TASK:>
Description:
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at
a specific column in the input text.
""" |
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol |
<SYSTEM_TASK:>
Extracts the exception line from the input string, and marks
<END_TASK>
<USER_TASK:>
Description:
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
""" |
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip() |
<SYSTEM_TASK:>
Method to take an exception and translate the Python internal traceback into a list
<END_TASK>
<USER_TASK:>
Description:
def explain(exc, depth=16):
"""
Method to take an exception and translate the Python internal traceback into a list
of the pyparsing expressions that caused the exception to be raised.
Parameters:
- exc - exception raised during parsing (need not be a ParseException, in support
of Python exceptions that might be raised in a parse action)
- depth (default=16) - number of levels back in the stack trace to list expression
and function names; if None, the full stack trace names will be listed; if 0, only
the failing input line, marker, and exception string will be shown
Returns a multi-line string listing the ParserElements and/or function names in the
exception's stack trace.
Note: the diagnostic output will include string representations of the expressions
that failed to parse. These representations will be more helpful if you use `setName` to
give identifiable names to your expressions. Otherwise they will use the default string
forms, which may be cryptic to read.
explain() is only supported under Python 3.
""" |
import inspect
if depth is None:
depth = sys.getrecursionlimit()
ret = []
if isinstance(exc, ParseBaseException):
ret.append(exc.line)
ret.append(' ' * (exc.col - 1) + '^')
ret.append("{0}: {1}".format(type(exc).__name__, exc))
if depth > 0:
callers = inspect.getinnerframes(exc.__traceback__, context=depth)
seen = set()
for i, ff in enumerate(callers[-depth:]):
frm = ff.frame
f_self = frm.f_locals.get('self', None)
if isinstance(f_self, ParserElement):
if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'):
continue
if f_self in seen:
continue
seen.add(f_self)
self_type = type(f_self)
ret.append("{0}.{1} - {2}".format(self_type.__module__,
self_type.__name__,
f_self))
elif f_self is not None:
self_type = type(f_self)
ret.append("{0}.{1}".format(self_type.__module__,
self_type.__name__))
else:
code = frm.f_code
if code.co_name in ('wrapper', '<module>'):
continue
ret.append("{0}".format(code.co_name))
depth -= 1
if not depth:
break
return '\n'.join(ret) |
<SYSTEM_TASK:>
Add sequence of elements to end of ParseResults list of elements.
<END_TASK>
<USER_TASK:>
Description:
def extend( self, itemseq ):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
""" |
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.