text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Retrives the distributions installed on the library path of the environment
<END_TASK>
<USER_TASK:>
Description:
def get_distributions(self):
"""
Retrives the distributions installed on the library path of the environment
:return: A set of distributions found on the library path
:rtype: iterator
""" |
pkg_resources = self.safe_import("pkg_resources")
libdirs = self.base_paths["libdirs"].split(os.pathsep)
dists = (pkg_resources.find_distributions(libdir) for libdir in libdirs)
for dist in itertools.chain.from_iterable(dists):
yield dist |
<SYSTEM_TASK:>
Find an egg by name in the given environment
<END_TASK>
<USER_TASK:>
Description:
def find_egg(self, egg_dist):
"""Find an egg by name in the given environment""" |
site_packages = self.libdir[1]
search_filename = "{0}.egg-link".format(egg_dist.project_name)
try:
user_site = site.getusersitepackages()
except AttributeError:
user_site = site.USER_SITE
search_locations = [site_packages, user_site]
for site_directory in search_locations:
egg = os.path.join(site_directory, search_filename)
if os.path.isfile(egg):
return egg |
<SYSTEM_TASK:>
Determine whether the supplied distribution is in the environment.
<END_TASK>
<USER_TASK:>
Description:
def dist_is_in_project(self, dist):
"""Determine whether the supplied distribution is in the environment.""" |
from .project import _normalized
prefixes = [
_normalized(prefix) for prefix in self.base_paths["libdirs"].split(os.pathsep)
if _normalized(prefix).startswith(_normalized(self.prefix.as_posix()))
]
location = self.locate_dist(dist)
if not location:
return False
location = _normalized(make_posix(location))
return any(location.startswith(prefix) for prefix in prefixes) |
<SYSTEM_TASK:>
Given a package name, returns whether it is installed in the environment
<END_TASK>
<USER_TASK:>
Description:
def is_installed(self, pkgname):
"""Given a package name, returns whether it is installed in the environment
:param str pkgname: The name of a package
:return: Whether the supplied package is installed in the environment
:rtype: bool
""" |
return any(d for d in self.get_distributions() if d.project_name == pkgname) |
<SYSTEM_TASK:>
Run a python command in the enviornment context.
<END_TASK>
<USER_TASK:>
Description:
def run_py(self, cmd, cwd=os.curdir):
"""Run a python command in the enviornment context.
:param cmd: A command to run in the environment - runs with `python -c`
:type cmd: str or list
:param str cwd: The working directory in which to execute the command, defaults to :data:`os.curdir`
:return: A finished command object
:rtype: :class:`~subprocess.Popen`
""" |
c = None
if isinstance(cmd, six.string_types):
script = vistir.cmdparse.Script.parse("{0} -c {1}".format(self.python, cmd))
else:
script = vistir.cmdparse.Script.parse([self.python, "-c"] + list(cmd))
with self.activated():
c = vistir.misc.run(script._parts, return_object=True, nospin=True, cwd=cwd, write_to_stdout=False)
return c |
<SYSTEM_TASK:>
Runs the environment's inline activation script
<END_TASK>
<USER_TASK:>
Description:
def run_activate_this(self):
"""Runs the environment's inline activation script""" |
if self.is_venv:
activate_this = os.path.join(self.scripts_dir, "activate_this.py")
if not os.path.isfile(activate_this):
raise OSError("No such file: {0!s}".format(activate_this))
with open(activate_this, "r") as f:
code = compile(f.read(), activate_this, "exec")
exec(code, dict(__file__=activate_this)) |
<SYSTEM_TASK:>
Helper context manager to activate the environment.
<END_TASK>
<USER_TASK:>
Description:
def activated(self, include_extras=True, extra_dists=None):
"""Helper context manager to activate the environment.
This context manager will set the following variables for the duration
of its activation:
* sys.prefix
* sys.path
* os.environ["VIRTUAL_ENV"]
* os.environ["PATH"]
In addition, it will make any distributions passed into `extra_dists` available
on `sys.path` while inside the context manager, as well as making `passa` itself
available.
The environment's `prefix` as well as `scripts_dir` properties are both prepended
to `os.environ["PATH"]` to ensure that calls to `~Environment.run()` use the
environment's path preferentially.
""" |
if not extra_dists:
extra_dists = []
original_path = sys.path
original_prefix = sys.prefix
parent_path = vistir.compat.Path(__file__).absolute().parent
vendor_dir = parent_path.joinpath("vendor").as_posix()
patched_dir = parent_path.joinpath("patched").as_posix()
parent_path = parent_path.as_posix()
self.add_dist("pip")
prefix = self.prefix.as_posix()
with vistir.contextmanagers.temp_environ(), vistir.contextmanagers.temp_path():
os.environ["PATH"] = os.pathsep.join([
vistir.compat.fs_str(self.scripts_dir),
vistir.compat.fs_str(self.prefix.as_posix()),
os.environ.get("PATH", "")
])
os.environ["PYTHONIOENCODING"] = vistir.compat.fs_str("utf-8")
os.environ["PYTHONDONTWRITEBYTECODE"] = vistir.compat.fs_str("1")
from .environments import PIPENV_USE_SYSTEM
if self.is_venv:
os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"]
os.environ["VIRTUAL_ENV"] = vistir.compat.fs_str(prefix)
else:
if not PIPENV_USE_SYSTEM and not os.environ.get("VIRTUAL_ENV"):
os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"]
os.environ.pop("PYTHONHOME", None)
sys.path = self.sys_path
sys.prefix = self.sys_prefix
site.addsitedir(self.base_paths["purelib"])
pip = self.safe_import("pip")
pip_vendor = self.safe_import("pip._vendor")
pep517_dir = os.path.join(os.path.dirname(pip_vendor.__file__), "pep517")
site.addsitedir(pep517_dir)
os.environ["PYTHONPATH"] = os.pathsep.join([
os.environ.get("PYTHONPATH", self.base_paths["PYTHONPATH"]), pep517_dir
])
if include_extras:
site.addsitedir(parent_path)
sys.path.extend([parent_path, patched_dir, vendor_dir])
extra_dists = list(self.extra_dists) + extra_dists
for extra_dist in extra_dists:
if extra_dist not in self.get_working_set():
extra_dist.activate(self.sys_path)
try:
yield
finally:
sys.path = original_path
sys.prefix = original_prefix
six.moves.reload_module(pkg_resources) |
<SYSTEM_TASK:>
A context manager which allows uninstallation of packages from the environment
<END_TASK>
<USER_TASK:>
Description:
def uninstall(self, pkgname, *args, **kwargs):
"""A context manager which allows uninstallation of packages from the environment
:param str pkgname: The name of a package to uninstall
>>> env = Environment("/path/to/env/root")
>>> with env.uninstall("pytz", auto_confirm=True, verbose=False) as uninstaller:
cleaned = uninstaller.paths
>>> if cleaned:
print("uninstalled packages: %s" % cleaned)
""" |
auto_confirm = kwargs.pop("auto_confirm", True)
verbose = kwargs.pop("verbose", False)
with self.activated():
monkey_patch = next(iter(
dist for dist in self.base_working_set
if dist.project_name == "recursive-monkey-patch"
), None)
if monkey_patch:
monkey_patch.activate()
pip_shims = self.safe_import("pip_shims")
pathset_base = pip_shims.UninstallPathSet
pathset_base._permitted = PatchedUninstaller._permitted
dist = next(
iter(filter(lambda d: d.project_name == pkgname, self.get_working_set())),
None
)
pathset = pathset_base.from_dist(dist)
if pathset is not None:
pathset.remove(auto_confirm=auto_confirm, verbose=verbose)
try:
yield pathset
except Exception as e:
if pathset is not None:
pathset.rollback()
else:
if pathset is not None:
pathset.commit()
if pathset is None:
return |
<SYSTEM_TASK:>
Convert a string to a null-terminated bytes object.
<END_TASK>
<USER_TASK:>
Description:
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
""" |
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL |
<SYSTEM_TASK:>
Convert a null-terminated bytes object to a string.
<END_TASK>
<USER_TASK:>
Description:
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
""" |
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors) |
<SYSTEM_TASK:>
Convert a python number to a number field.
<END_TASK>
<USER_TASK:>
Description:
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
""" |
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = bytearray()
for i in range(digits - 1):
s.insert(0, n & 0o377)
n >>= 8
s.insert(0, 0o200)
return s |
<SYSTEM_TASK:>
Return True if name points to a tar archive that we
<END_TASK>
<USER_TASK:>
Description:
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
""" |
try:
t = open(name)
t.close()
return True
except TarError:
return False |
<SYSTEM_TASK:>
Initialize for writing with gzip compression.
<END_TASK>
<USER_TASK:>
Description:
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
""" |
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL) |
<SYSTEM_TASK:>
Write string s to the stream.
<END_TASK>
<USER_TASK:>
Description:
def write(self, s):
"""Write string s to the stream.
""" |
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s) |
<SYSTEM_TASK:>
Write string s to the stream if a whole new block
<END_TASK>
<USER_TASK:>
Description:
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
""" |
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:] |
<SYSTEM_TASK:>
Close the _Stream object. No operation should be
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
""" |
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True |
<SYSTEM_TASK:>
Initialize for reading a gzip compressed fileobj.
<END_TASK>
<USER_TASK:>
Description:
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
""" |
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2) |
<SYSTEM_TASK:>
Set the stream's file pointer to pos. Negative seeking
<END_TASK>
<USER_TASK:>
Description:
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
""" |
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos |
<SYSTEM_TASK:>
Return the next size number of bytes from the stream.
<END_TASK>
<USER_TASK:>
Description:
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
""" |
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf |
<SYSTEM_TASK:>
Return size bytes from the stream.
<END_TASK>
<USER_TASK:>
Description:
def _read(self, size):
"""Return size bytes from the stream.
""" |
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf |
<SYSTEM_TASK:>
Return size bytes from stream. If internal buffer is empty,
<END_TASK>
<USER_TASK:>
Description:
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
""" |
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf |
<SYSTEM_TASK:>
Read at most size bytes from the file. If size is not
<END_TASK>
<USER_TASK:>
Description:
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
""" |
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf |
<SYSTEM_TASK:>
Read one entire line from the file. If size is present
<END_TASK>
<USER_TASK:>
Description:
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
""" |
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf |
<SYSTEM_TASK:>
Seek to a position in the file.
<END_TASK>
<USER_TASK:>
Description:
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
""" |
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position) |
<SYSTEM_TASK:>
Return the TarInfo's attributes as a dictionary.
<END_TASK>
<USER_TASK:>
Description:
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
""" |
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info |
<SYSTEM_TASK:>
Return a tar header as a string of 512 byte blocks.
<END_TASK>
<USER_TASK:>
Description:
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
""" |
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format") |
<SYSTEM_TASK:>
Return the object as a ustar header block.
<END_TASK>
<USER_TASK:>
Description:
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
""" |
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors) |
<SYSTEM_TASK:>
Return the object as a GNU header block sequence.
<END_TASK>
<USER_TASK:>
Description:
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
""" |
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors) |
<SYSTEM_TASK:>
Return the object as a ustar header block. If it cannot be
<END_TASK>
<USER_TASK:>
Description:
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
""" |
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") |
<SYSTEM_TASK:>
Split a name longer than 100 chars into a prefix
<END_TASK>
<USER_TASK:>
Description:
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
""" |
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name |
<SYSTEM_TASK:>
Return the string payload filled with zero bytes
<END_TASK>
<USER_TASK:>
Description:
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
""" |
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload |
<SYSTEM_TASK:>
Return a POSIX.1-2008 extended or global header sequence
<END_TASK>
<USER_TASK:>
Description:
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
""" |
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records) |
<SYSTEM_TASK:>
Choose the right processing method depending on
<END_TASK>
<USER_TASK:>
Description:
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
""" |
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile) |
<SYSTEM_TASK:>
Process a builtin type or an unknown type which
<END_TASK>
<USER_TASK:>
Description:
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
""" |
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self |
<SYSTEM_TASK:>
Process the blocks that hold a GNU longname
<END_TASK>
<USER_TASK:>
Description:
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
""" |
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next |
<SYSTEM_TASK:>
Process an extended or global header as described in
<END_TASK>
<USER_TASK:>
Description:
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
""" |
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next |
<SYSTEM_TASK:>
Process a GNU tar extended sparse header, version 0.1.
<END_TASK>
<USER_TASK:>
Description:
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
""" |
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2])) |
<SYSTEM_TASK:>
Replace fields with supplemental information from a previous
<END_TASK>
<USER_TASK:>
Description:
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
""" |
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy() |
<SYSTEM_TASK:>
Decode a single field from a pax record.
<END_TASK>
<USER_TASK:>
Description:
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
""" |
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors) |
<SYSTEM_TASK:>
Open a tar archive for reading, writing or appending. Return
<END_TASK>
<USER_TASK:>
Description:
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
""" |
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode") |
<SYSTEM_TASK:>
Open uncompressed tar archive name for reading or writing.
<END_TASK>
<USER_TASK:>
Description:
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
""" |
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs) |
<SYSTEM_TASK:>
Open gzip compressed tar archive name for reading or writing.
<END_TASK>
<USER_TASK:>
Description:
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
""" |
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t |
<SYSTEM_TASK:>
Open bzip2 compressed tar archive name for reading or writing.
<END_TASK>
<USER_TASK:>
Description:
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
""" |
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t |
<SYSTEM_TASK:>
Close the TarFile. In write-mode, two finishing zero blocks are
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
""" |
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True |
<SYSTEM_TASK:>
Return a TarInfo object for member `name'. If `name' can not be
<END_TASK>
<USER_TASK:>
Description:
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
""" |
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo |
<SYSTEM_TASK:>
Return the members of the archive as a list of TarInfo objects. The
<END_TASK>
<USER_TASK:>
Description:
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
""" |
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members |
<SYSTEM_TASK:>
Print a table of contents to sys.stdout. If `verbose' is False, only
<END_TASK>
<USER_TASK:>
Description:
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
""" |
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print() |
<SYSTEM_TASK:>
Extract the TarInfo object tarinfo to a physical
<END_TASK>
<USER_TASK:>
Description:
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
""" |
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath) |
<SYSTEM_TASK:>
Make a directory called targetpath.
<END_TASK>
<USER_TASK:>
Description:
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
""" |
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise |
<SYSTEM_TASK:>
Make a file called targetpath.
<END_TASK>
<USER_TASK:>
Description:
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
""" |
source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, "wb")
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close() |
<SYSTEM_TASK:>
Make a file from a TarInfo object with an unknown type
<END_TASK>
<USER_TASK:>
Description:
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
""" |
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type) |
<SYSTEM_TASK:>
Make a fifo called targetpath.
<END_TASK>
<USER_TASK:>
Description:
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
""" |
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system") |
<SYSTEM_TASK:>
Make a character or block device called targetpath.
<END_TASK>
<USER_TASK:>
Description:
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
""" |
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor)) |
<SYSTEM_TASK:>
Set owner of targetpath according to tarinfo.
<END_TASK>
<USER_TASK:>
Description:
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
""" |
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner") |
<SYSTEM_TASK:>
Set file permissions of targetpath according to tarinfo.
<END_TASK>
<USER_TASK:>
Description:
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
""" |
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError("could not change mode") |
<SYSTEM_TASK:>
Set modification time of targetpath according to tarinfo.
<END_TASK>
<USER_TASK:>
Description:
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
""" |
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError("could not change modification time") |
<SYSTEM_TASK:>
Return the next member of the archive as a TarInfo object, when
<END_TASK>
<USER_TASK:>
Description:
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
""" |
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo |
<SYSTEM_TASK:>
Find an archive member by name from bottom to top.
<END_TASK>
<USER_TASK:>
Description:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
""" |
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member |
<SYSTEM_TASK:>
Read through the entire archive file and look for readable
<END_TASK>
<USER_TASK:>
Description:
def _load(self):
"""Read through the entire archive file and look for readable
members.
""" |
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True |
<SYSTEM_TASK:>
Check if TarFile is still open, and if the operation's mode
<END_TASK>
<USER_TASK:>
Description:
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
""" |
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode) |
<SYSTEM_TASK:>
Find the target member of a symlink or hardlink member in the
<END_TASK>
<USER_TASK:>
Description:
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
""" |
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member |
<SYSTEM_TASK:>
Returns the absolute path to a given relative path.
<END_TASK>
<USER_TASK:>
Description:
def path_to(self, p):
"""Returns the absolute path to a given relative path.""" |
if os.path.isabs(p):
return p
return os.sep.join([self._original_dir, p]) |
<SYSTEM_TASK:>
Returns a list of packages for pip-tools to consume.
<END_TASK>
<USER_TASK:>
Description:
def _build_package_list(self, package_section):
"""Returns a list of packages for pip-tools to consume.""" |
from pipenv.vendor.requirementslib.utils import is_vcs
ps = {}
# TODO: Separate the logic for showing packages from the filters for supplying pip-tools
for k, v in self.parsed_pipfile.get(package_section, {}).items():
# Skip editable VCS deps.
if hasattr(v, "keys"):
# When a vcs url is gven without editable it only appears as a key
# Eliminate any vcs, path, or url entries which are not editable
# Since pip-tools can't do deep resolution on them, even setuptools-installable ones
if (
is_vcs(v)
or is_vcs(k)
or (is_installable_file(k) or is_installable_file(v))
or any(
(
prefix in v
and (os.path.isfile(v[prefix]) or is_valid_url(v[prefix]))
)
for prefix in ["path", "file"]
)
):
# If they are editable, do resolve them
if "editable" not in v:
# allow wheels to be passed through
if not (
hasattr(v, "keys")
and v.get("path", v.get("file", "")).endswith(".whl")
):
continue
ps.update({k: v})
else:
ps.update({k: v})
else:
ps.update({k: v})
else:
# Since these entries have no attributes we know they are not editable
# So we can safely exclude things that need to be editable in order to be resolved
# First exclude anything that is a vcs entry either in the key or value
if not (
any(is_vcs(i) for i in [k, v])
or
# Then exclude any installable files that are not directories
# Because pip-tools can resolve setup.py for example
any(is_installable_file(i) for i in [k, v])
or
# Then exclude any URLs because they need to be editable also
# Things that are excluded can only be 'shallow resolved'
any(is_valid_url(i) for i in [k, v])
):
ps.update({k: v})
return ps |
<SYSTEM_TASK:>
Get the name of the virtualenv adjusted for windows if needed
<END_TASK>
<USER_TASK:>
Description:
def _get_virtualenv_hash(self, name):
"""Get the name of the virtualenv adjusted for windows if needed
Returns (name, encoded_hash)
""" |
def get_name(name, location):
name = self._sanitize(name)
hash = hashlib.sha256(location.encode()).digest()[:6]
encoded_hash = base64.urlsafe_b64encode(hash).decode()
return name, encoded_hash[:8]
clean_name, encoded_hash = get_name(name, self.pipfile_location)
venv_name = "{0}-{1}".format(clean_name, encoded_hash)
# This should work most of the time for
# Case-sensitive filesystems,
# In-project venv
# "Proper" path casing (on non-case-sensitive filesystems).
if (
not fnmatch.fnmatch("A", "a")
or self.is_venv_in_project()
or get_workon_home().joinpath(venv_name).exists()
):
return clean_name, encoded_hash
# Check for different capitalization of the same project.
for path in get_workon_home().iterdir():
if not is_virtual_environment(path):
continue
try:
env_name, hash_ = path.name.rsplit("-", 1)
except ValueError:
continue
if len(hash_) != 8 or env_name.lower() != name.lower():
continue
return get_name(env_name, self.pipfile_location.replace(name, env_name))
# Use the default if no matching env exists.
return clean_name, encoded_hash |
<SYSTEM_TASK:>
Registers a proper name to the database.
<END_TASK>
<USER_TASK:>
Description:
def register_proper_name(self, name):
"""Registers a proper name to the database.""" |
with self.proper_names_db_path.open("a") as f:
f.write(u"{0}\n".format(name)) |
<SYSTEM_TASK:>
Pipfile.lock divided by PyPI and external dependencies.
<END_TASK>
<USER_TASK:>
Description:
def _lockfile(self):
"""Pipfile.lock divided by PyPI and external dependencies.""" |
pfile = pipfile.load(self.pipfile_location, inject_env=False)
lockfile = json.loads(pfile.lock())
for section in ("default", "develop"):
lock_section = lockfile.get(section, {})
for key in list(lock_section.keys()):
norm_key = pep423_name(key)
lockfile[section][norm_key] = lock_section.pop(key)
return lockfile |
<SYSTEM_TASK:>
Creates the Pipfile, filled with juicy defaults.
<END_TASK>
<USER_TASK:>
Description:
def create_pipfile(self, python=None):
"""Creates the Pipfile, filled with juicy defaults.""" |
from .vendor.pip_shims.shims import (
ConfigOptionParser, make_option_group, index_group
)
config_parser = ConfigOptionParser(name=self.name)
config_parser.add_option_group(make_option_group(index_group, config_parser))
install = config_parser.option_groups[0]
indexes = (
" ".join(install.get_option("--extra-index-url").default)
.lstrip("\n")
.split("\n")
)
sources = [DEFAULT_SOURCE,]
for i, index in enumerate(indexes):
if not index:
continue
source_name = "pip_index_{}".format(i)
verify_ssl = index.startswith("https")
sources.append(
{u"url": index, u"verify_ssl": verify_ssl, u"name": source_name}
)
data = {
u"source": sources,
# Default packages.
u"packages": {},
u"dev-packages": {},
}
# Default requires.
required_python = python
if not python:
if self.virtualenv_location:
required_python = self.which("python", self.virtualenv_location)
else:
required_python = self.which("python")
version = python_version(required_python) or PIPENV_DEFAULT_PYTHON_VERSION
if version and len(version) >= 3:
data[u"requires"] = {"python_version": version[: len("2.7")]}
self.write_toml(data) |
<SYSTEM_TASK:>
Writes the given data structure out as TOML.
<END_TASK>
<USER_TASK:>
Description:
def write_toml(self, data, path=None):
"""Writes the given data structure out as TOML.""" |
if path is None:
path = self.pipfile_location
data = convert_toml_outline_tables(data)
try:
formatted_data = tomlkit.dumps(data).rstrip()
except Exception:
document = tomlkit.document()
for section in ("packages", "dev-packages"):
document[section] = tomlkit.container.Table()
# Convert things to inline tables — fancy :)
for package in data.get(section, {}):
if hasattr(data[section][package], "keys"):
table = tomlkit.inline_table()
table.update(data[section][package])
document[section][package] = table
else:
document[section][package] = tomlkit.string(data[section][package])
formatted_data = tomlkit.dumps(document).rstrip()
if (
vistir.compat.Path(path).absolute()
== vistir.compat.Path(self.pipfile_location).absolute()
):
newlines = self._pipfile_newlines
else:
newlines = DEFAULT_NEWLINES
formatted_data = cleanup_toml(formatted_data)
with io.open(path, "w", newline=newlines) as f:
f.write(formatted_data)
# pipfile is mutated!
self.clear_pipfile_cache() |
<SYSTEM_TASK:>
Write out the lockfile.
<END_TASK>
<USER_TASK:>
Description:
def write_lockfile(self, content):
"""Write out the lockfile.
""" |
s = self._lockfile_encoder.encode(content)
open_kwargs = {"newline": self._lockfile_newlines, "encoding": "utf-8"}
with vistir.contextmanagers.atomic_open_for_write(
self.lockfile_location, **open_kwargs
) as f:
f.write(s)
# Write newline at end of document. GH-319.
# Only need '\n' here; the file object handles the rest.
if not s.endswith(u"\n"):
f.write(u"\n") |
<SYSTEM_TASK:>
Given a source, find it.
<END_TASK>
<USER_TASK:>
Description:
def find_source(self, source):
"""
Given a source, find it.
source can be a url or an index name.
""" |
if not is_valid_url(source):
try:
source = self.get_source(name=source)
except SourceNotFound:
source = self.get_source(url=source)
else:
source = self.get_source(url=source)
return source |
<SYSTEM_TASK:>
Get the equivalent package name in pipfile
<END_TASK>
<USER_TASK:>
Description:
def get_package_name_in_pipfile(self, package_name, dev=False):
"""Get the equivalent package name in pipfile""" |
key = "dev-packages" if dev else "packages"
section = self.parsed_pipfile.get(key, {})
package_name = pep423_name(package_name)
for name in section.keys():
if pep423_name(name) == package_name:
return name
return None |
<SYSTEM_TASK:>
Adds a given index to the Pipfile.
<END_TASK>
<USER_TASK:>
Description:
def add_index_to_pipfile(self, index, verify_ssl=True):
"""Adds a given index to the Pipfile.""" |
# Read and append Pipfile.
p = self.parsed_pipfile
try:
self.get_source(url=index)
except SourceNotFound:
source = {"url": index, "verify_ssl": verify_ssl}
else:
return
source["name"] = self.src_name_from_url(index)
# Add the package to the group.
if "source" not in p:
p["source"] = [source]
else:
p["source"].append(source)
# Write Pipfile.
self.write_toml(p) |
<SYSTEM_TASK:>
Ensures proper casing of Pipfile packages
<END_TASK>
<USER_TASK:>
Description:
def ensure_proper_casing(self):
"""Ensures proper casing of Pipfile packages""" |
pfile = self.parsed_pipfile
casing_changed = self.proper_case_section(pfile.get("packages", {}))
casing_changed |= self.proper_case_section(pfile.get("dev-packages", {}))
return casing_changed |
<SYSTEM_TASK:>
Verify proper casing is retrieved, when available, for each
<END_TASK>
<USER_TASK:>
Description:
def proper_case_section(self, section):
"""Verify proper casing is retrieved, when available, for each
dependency in the section.
""" |
# Casing for section.
changed_values = False
unknown_names = [k for k in section.keys() if k not in set(self.proper_names)]
# Replace each package with proper casing.
for dep in unknown_names:
try:
# Get new casing for package name.
new_casing = proper_case(dep)
except IOError:
# Unable to normalize package name.
continue
if new_casing != dep:
changed_values = True
self.register_proper_name(new_casing)
# Replace old value with new value.
old_value = section[dep]
section[new_casing] = old_value
del section[dep]
# Return whether or not values have been changed.
return changed_values |
<SYSTEM_TASK:>
Reverse the order of all items in the dictionary.
<END_TASK>
<USER_TASK:>
Description:
def reverse(self):
"""
Reverse the order of all items in the dictionary.
Example:
omd = omdict([(1,1), (1,11), (1,111), (2,2), (3,3)])
omd.reverse()
omd.allitems() == [(3,3), (2,2), (1,111), (1,11), (1,1)]
Returns: <self>.
""" |
for key in six.iterkeys(self._map):
self._map[key].reverse()
self._items.reverse()
return self |
<SYSTEM_TASK:>
Given an iterable of arguments and an iterable of nargs specifications,
<END_TASK>
<USER_TASK:>
Description:
def _unpack_args(args, nargs_spec):
"""Given an iterable of arguments and an iterable of nargs specifications,
it returns a tuple with all the unpacked arguments at the first index
and all remaining arguments as the second.
The nargs specification is the number of arguments that should be consumed
or `-1` to indicate that this position should eat up all the remainders.
Missing items are filled with `None`.
""" |
args = deque(args)
nargs_spec = deque(nargs_spec)
rv = []
spos = None
def _fetch(c):
try:
if spos is None:
return c.popleft()
else:
return c.pop()
except IndexError:
return None
while nargs_spec:
nargs = _fetch(nargs_spec)
if nargs == 1:
rv.append(_fetch(args))
elif nargs > 1:
x = [_fetch(args) for _ in range(nargs)]
# If we're reversed, we're pulling in the arguments in reverse,
# so we need to turn them around.
if spos is not None:
x.reverse()
rv.append(tuple(x))
elif nargs < 0:
if spos is not None:
raise TypeError('Cannot have two nargs < 0')
spos = len(rv)
rv.append(None)
# spos is the position of the wildcard (star). If it's not `None`,
# we fill it with the remainder.
if spos is not None:
rv[spos] = tuple(args)
args = []
rv[spos + 1:] = reversed(rv[spos + 1:])
return tuple(rv), list(args) |
<SYSTEM_TASK:>
Given an argument string this attempts to split it into small parts.
<END_TASK>
<USER_TASK:>
Description:
def split_arg_string(string):
"""Given an argument string this attempts to split it into small parts.""" |
rv = []
for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
r'|\S+)\s*', string, re.S):
arg = match.group().strip()
if arg[:1] == arg[-1:] and arg[:1] in '"\'':
arg = arg[1:-1].encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
try:
arg = type(string)(arg)
except UnicodeError:
pass
rv.append(arg)
return rv |
<SYSTEM_TASK:>
Adds a positional argument named `dest` to the parser.
<END_TASK>
<USER_TASK:>
Description:
def add_argument(self, dest, nargs=1, obj=None):
"""Adds a positional argument named `dest` to the parser.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
""" |
if obj is None:
obj = dest
self._args.append(Argument(dest=dest, nargs=nargs, obj=obj)) |
<SYSTEM_TASK:>
Makes a dependency graph from the given distributions.
<END_TASK>
<USER_TASK:>
Description:
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
""" |
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph |
<SYSTEM_TASK:>
A convenience method for making a dist given just a name and version.
<END_TASK>
<USER_TASK:>
Description:
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
""" |
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Placeholder for summary'
return Distribution(md) |
<SYSTEM_TASK:>
Clear the cache, setting it to its initial state.
<END_TASK>
<USER_TASK:>
Description:
def clear(self):
"""
Clear the cache, setting it to its initial state.
""" |
self.name.clear()
self.path.clear()
self.generated = False |
<SYSTEM_TASK:>
Scan the path for distributions and populate the cache with
<END_TASK>
<USER_TASK:>
Description:
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
""" |
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True |
<SYSTEM_TASK:>
Looks for a named distribution on the path.
<END_TASK>
<USER_TASK:>
Description:
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
""" |
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result |
<SYSTEM_TASK:>
Return all of the exported entries in a particular category.
<END_TASK>
<USER_TASK:>
Description:
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
""" |
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v |
<SYSTEM_TASK:>
Get the hash of some data, using a particular hash algorithm, if
<END_TASK>
<USER_TASK:>
Description:
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
""" |
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest) |
<SYSTEM_TASK:>
Read exports data from a file in .ini format.
<END_TASK>
<USER_TASK:>
Description:
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
""" |
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result |
<SYSTEM_TASK:>
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
<END_TASK>
<USER_TASK:>
Description:
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
""" |
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path |
<SYSTEM_TASK:>
Returns a path located under the ``.dist-info`` directory. Returns a
<END_TASK>
<USER_TASK:>
Description:
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
""" |
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path) |
<SYSTEM_TASK:>
Iterates over the ``RECORD`` entries and returns paths for each line if
<END_TASK>
<USER_TASK:>
Description:
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
""" |
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path |
<SYSTEM_TASK:>
Iterates over the ``installed-files.txt`` entries and returns paths for
<END_TASK>
<USER_TASK:>
Description:
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
""" |
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line |
<SYSTEM_TASK:>
Prints only a subgraph
<END_TASK>
<USER_TASK:>
Description:
def repr_node(self, dist, level=1):
"""Prints only a subgraph""" |
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output) |
<SYSTEM_TASK:>
Cerberus error messages expect regular binary strings.
<END_TASK>
<USER_TASK:>
Description:
def encode_unicode(f):
"""Cerberus error messages expect regular binary strings.
If unicode is used in a ValidationError message can't be printed.
This decorator ensures that if legacy Python is used unicode
strings are encoded before passing to a function.
""" |
@wraps(f)
def wrapped(obj, error):
def _encode(value):
"""Helper encoding unicode strings into binary utf-8"""
if isinstance(value, unicode): # noqa: F821
return value.encode('utf-8')
return value
error = copy(error)
error.document_path = _encode(error.document_path)
error.schema_path = _encode(error.schema_path)
error.constraint = _encode(error.constraint)
error.value = _encode(error.value)
error.info = _encode(error.info)
return f(obj, error)
return wrapped if PYTHON_VERSION < 3 else f |
<SYSTEM_TASK:>
Add an error to the tree.
<END_TASK>
<USER_TASK:>
Description:
def add(self, error):
""" Add an error to the tree.
:param error: :class:`~cerberus.errors.ValidationError`
""" |
if not self._path_of_(error):
self.errors.append(error)
self.errors.sort()
else:
super(ErrorTree, self).add(error) |
<SYSTEM_TASK:>
Returns all errors for a particular path.
<END_TASK>
<USER_TASK:>
Description:
def fetch_errors_from(self, path):
""" Returns all errors for a particular path.
:param path: :class:`tuple` of :term:`hashable` s.
:rtype: :class:`~cerberus.errors.ErrorList`
""" |
node = self.fetch_node_from(path)
if node is not None:
return node.errors
else:
return ErrorList() |
<SYSTEM_TASK:>
Returns a node for a path.
<END_TASK>
<USER_TASK:>
Description:
def fetch_node_from(self, path):
""" Returns a node for a path.
:param path: Tuple of :term:`hashable` s.
:rtype: :class:`~cerberus.errors.ErrorTreeNode` or :obj:`None`
""" |
context = self
for key in path:
context = context[key]
if context is None:
break
return context |
<SYSTEM_TASK:>
Recursively rewrites the error path to correctly represent logic errors
<END_TASK>
<USER_TASK:>
Description:
def _rewrite_error_path(self, error, offset=0):
"""
Recursively rewrites the error path to correctly represent logic errors
""" |
if error.is_logic_error:
self._rewrite_logic_error_path(error, offset)
elif error.is_group_error:
self._rewrite_group_error_path(error, offset) |
<SYSTEM_TASK:>
Run command with environment variables present.
<END_TASK>
<USER_TASK:>
Description:
def run(ctx, commandline):
"""Run command with environment variables present.""" |
file = ctx.obj['FILE']
dotenv_as_dict = dotenv_values(file)
if not commandline:
click.echo('No command given.')
exit(1)
ret = run_command(commandline, dotenv_as_dict)
exit(ret) |
<SYSTEM_TASK:>
Check whether the distribution is in the current Python installation.
<END_TASK>
<USER_TASK:>
Description:
def _is_installation_local(name):
"""Check whether the distribution is in the current Python installation.
This is used to distinguish packages seen by a virtual environment. A venv
may be able to see global packages, but we don't want to mess with them.
""" |
loc = os.path.normcase(pkg_resources.working_set.by_key[name].location)
pre = os.path.normcase(sys.prefix)
return os.path.commonprefix([loc, pre]) == pre |
<SYSTEM_TASK:>
Group locally installed packages based on given specifications.
<END_TASK>
<USER_TASK:>
Description:
def _group_installed_names(packages):
"""Group locally installed packages based on given specifications.
`packages` is a name-package mapping that are used as baseline to
determine how the installed package should be grouped.
Returns a 3-tuple of disjoint sets, all containing names of installed
packages:
* `uptodate`: These match the specifications.
* `outdated`: These installations are specified, but don't match the
specifications in `packages`.
* `unneeded`: These are installed, but not specified in `packages`.
""" |
groupcoll = GroupCollection(set(), set(), set(), set())
for distro in pkg_resources.working_set:
name = distro.key
try:
package = packages[name]
except KeyError:
groupcoll.unneeded.add(name)
continue
r = requirementslib.Requirement.from_pipfile(name, package)
if not r.is_named:
# Always mark non-named. I think pip does something similar?
groupcoll.outdated.add(name)
elif not _is_up_to_date(distro, r.get_version()):
groupcoll.outdated.add(name)
else:
groupcoll.uptodate.add(name)
return groupcoll |
<SYSTEM_TASK:>
Find all files under the base and set ``allfiles`` to the absolute
<END_TASK>
<USER_TASK:>
Description:
def findall(self):
"""Find all files under the base and set ``allfiles`` to the absolute
pathnames of files found.
""" |
from stat import S_ISREG, S_ISDIR, S_ISLNK
self.allfiles = allfiles = []
root = self.base
stack = [root]
pop = stack.pop
push = stack.append
while stack:
root = pop()
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
# Avoid excess stat calls -- just one will do, thank you!
stat = os.stat(fullname)
mode = stat.st_mode
if S_ISREG(mode):
allfiles.append(fsdecode(fullname))
elif S_ISDIR(mode) and not S_ISLNK(mode):
push(fullname) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.