text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Check virtualenv membership dynamically
<END_TASK>
<USER_TASK:>
Description:
def is_in_virtualenv():
"""
Check virtualenv membership dynamically
:return: True or false depending on whether we are in a regular virtualenv or not
:rtype: bool
""" |
pipenv_active = os.environ.get("PIPENV_ACTIVE", False)
virtual_env = None
use_system = False
ignore_virtualenvs = bool(os.environ.get("PIPENV_IGNORE_VIRTUALENVS", False))
if not pipenv_active and not ignore_virtualenvs:
virtual_env = os.environ.get("VIRTUAL_ENV")
use_system = bool(virtual_env)
return (use_system or virtual_env) and not (pipenv_active or ignore_virtualenvs) |
<SYSTEM_TASK:>
Unpack an object from `packed`.
<END_TASK>
<USER_TASK:>
Description:
def unpackb(packed, **kwargs):
"""
Unpack an object from `packed`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
""" |
unpacker = Unpacker(None, **kwargs)
unpacker.feed(packed)
try:
ret = unpacker._unpack()
except OutOfData:
raise UnpackValueError("Data is not enough.")
if unpacker._got_extradata():
raise ExtraData(ret, unpacker._get_extradata())
return ret |
<SYSTEM_TASK:>
Gets rid of the used parts of the buffer.
<END_TASK>
<USER_TASK:>
Description:
def _consume(self):
""" Gets rid of the used parts of the buffer. """ |
self._stream_offset += self._buff_i - self._buf_checkpoint
self._buf_checkpoint = self._buff_i |
<SYSTEM_TASK:>
Establish a socket connection and set nodelay settings on it.
<END_TASK>
<USER_TASK:>
Description:
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
""" |
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self._dns_host, self.port), self.timeout, **extra_kw)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn |
<SYSTEM_TASK:>
Alternative to the common request method, which sends the
<END_TASK>
<USER_TASK:>
Description:
def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
body with chunked encoding and not as one block
""" |
headers = HTTPHeaderDict(headers if headers is not None else {})
skip_accept_encoding = 'accept-encoding' in headers
skip_host = 'host' in headers
self.putrequest(
method,
url,
skip_accept_encoding=skip_accept_encoding,
skip_host=skip_host
)
for header, value in headers.items():
self.putheader(header, value)
if 'transfer-encoding' not in headers:
self.putheader('Transfer-Encoding', 'chunked')
self.endheaders()
if body is not None:
stringish_types = six.string_types + (bytes,)
if isinstance(body, stringish_types):
body = (body,)
for chunk in body:
if not chunk:
continue
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf8')
len_str = hex(len(chunk))[2:]
self.send(len_str.encode('utf-8'))
self.send(b'\r\n')
self.send(chunk)
self.send(b'\r\n')
# After the if clause, to always have a closed body
self.send(b'0\r\n\r\n') |
<SYSTEM_TASK:>
This method should only be called once, before the connection is used.
<END_TASK>
<USER_TASK:>
Description:
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None):
"""
This method should only be called once, before the connection is used.
""" |
# If cert_reqs is not provided, we can try to guess. If the user gave
# us a cert database, we assume they want to use it: otherwise, if
# they gave us an SSL Context object we should use whatever is set for
# it.
if cert_reqs is None:
if ca_certs or ca_cert_dir:
cert_reqs = 'CERT_REQUIRED'
elif self.ssl_context is not None:
cert_reqs = self.ssl_context.verify_mode
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) |
<SYSTEM_TASK:>
Catch known errors and prettify them instead of showing the
<END_TASK>
<USER_TASK:>
Description:
def prettify_exc(error):
"""Catch known errors and prettify them instead of showing the
entire traceback, for better UX""" |
matched_exceptions = [k for k in KNOWN_EXCEPTIONS.keys() if k in error]
if not matched_exceptions:
return "{}".format(vistir.misc.decode_for_output(error))
errors = []
for match in matched_exceptions:
_, error, info = error.rpartition(KNOWN_EXCEPTIONS[match])
errors.append("{} {}".format(error, info))
return "\n".join(errors) |
<SYSTEM_TASK:>
Returns the completion results for click.core.Choice
<END_TASK>
<USER_TASK:>
Description:
def choice_complete(self, ctx, incomplete):
"""Returns the completion results for click.core.Choice
Parameters
----------
ctx : click.core.Context
The current context
incomplete :
The string to complete
Returns
-------
[(str, str)]
A list of completion results
""" |
return [
(c, None) for c in self.choices
if completion_configuration.match_incomplete(c, incomplete)
] |
<SYSTEM_TASK:>
Parse command-line argument vector.
<END_TASK>
<USER_TASK:>
Description:
def parse_argv(tokens, options, options_first=False):
"""Parse command-line argument vector.
If options_first:
argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ;
else:
argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ;
""" |
parsed = []
while tokens.current() is not None:
if tokens.current() == '--':
return parsed + [Argument(None, v) for v in tokens]
elif tokens.current().startswith('--'):
parsed += parse_long(tokens, options)
elif tokens.current().startswith('-') and tokens.current() != '-':
parsed += parse_shorts(tokens, options)
elif options_first:
return parsed + [Argument(None, v) for v in tokens]
else:
parsed.append(Argument(None, tokens.move()))
return parsed |
<SYSTEM_TASK:>
Use `subprocess.Popen` to get the output of a command and decode it.
<END_TASK>
<USER_TASK:>
Description:
def run(
cmd,
env=None,
return_object=False,
block=True,
cwd=None,
verbose=False,
nospin=False,
spinner_name=None,
combine_stderr=True,
display_limit=200,
write_to_stdout=True,
):
"""Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality.
""" |
_env = os.environ.copy()
if env:
_env.update(env)
if six.PY2:
fs_encode = partial(to_bytes, encoding=locale_encoding)
_env = {fs_encode(k): fs_encode(v) for k, v in _env.items()}
else:
_env = {k: fs_str(v) for k, v in _env.items()}
if not spinner_name:
spinner_name = "bouncingBar"
if six.PY2:
if isinstance(cmd, six.string_types):
cmd = cmd.encode("utf-8")
elif isinstance(cmd, (list, tuple)):
cmd = [c.encode("utf-8") for c in cmd]
if not isinstance(cmd, Script):
cmd = Script.parse(cmd)
if block or not return_object:
combine_stderr = False
start_text = ""
with spinner(
spinner_name=spinner_name,
start_text=start_text,
nospin=nospin,
write_to_stdout=write_to_stdout,
) as sp:
return _create_subprocess(
cmd,
env=_env,
return_object=return_object,
block=block,
cwd=cwd,
verbose=verbose,
spinner=sp,
combine_stderr=combine_stderr,
start_text=start_text,
write_to_stdout=True,
) |
<SYSTEM_TASK:>
Determine the proper output encoding for terminal rendering
<END_TASK>
<USER_TASK:>
Description:
def getpreferredencoding():
"""Determine the proper output encoding for terminal rendering""" |
# Borrowed from Invoke
# (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881)
_encoding = locale.getpreferredencoding(False)
if six.PY2 and not sys.platform == "win32":
_default_encoding = locale.getdefaultlocale()[1]
if _default_encoding is not None:
_encoding = _default_encoding
return _encoding |
<SYSTEM_TASK:>
Given a string, decode it for output to a terminal
<END_TASK>
<USER_TASK:>
Description:
def decode_for_output(output, target_stream=None, translation_map=None):
"""Given a string, decode it for output to a terminal
:param str output: A string to print to a terminal
:param target_stream: A stream to write to, we will encode to target this stream if possible.
:param dict translation_map: A mapping of unicode character ordinals to replacement strings.
:return: A re-encoded string using the preferred encoding
:rtype: str
""" |
if not isinstance(output, six.string_types):
return output
encoding = None
if target_stream is not None:
encoding = getattr(target_stream, "encoding", None)
encoding = get_output_encoding(encoding)
try:
output = _encode(output, encoding=encoding, translation_map=translation_map)
except (UnicodeDecodeError, UnicodeEncodeError):
output = to_native_string(output)
output = _encode(
output, encoding=encoding, errors="replace", translation_map=translation_map
)
return to_text(output, encoding=encoding, errors="replace") |
<SYSTEM_TASK:>
Given an encoding name, get the canonical name from a codec lookup.
<END_TASK>
<USER_TASK:>
Description:
def get_canonical_encoding_name(name):
# type: (str) -> str
"""
Given an encoding name, get the canonical name from a codec lookup.
:param str name: The name of the codec to lookup
:return: The canonical version of the codec name
:rtype: str
""" |
import codecs
try:
codec = codecs.lookup(name)
except LookupError:
return name
else:
return codec.name |
<SYSTEM_TASK:>
Returns True if the system can bind an IPv6 address.
<END_TASK>
<USER_TASK:>
Description:
def _has_ipv6(host):
""" Returns True if the system can bind an IPv6 address. """ |
sock = None
has_ipv6 = False
# App Engine doesn't support IPV6 sockets and actually has a quota on the
# number of sockets that can be used, so just early out here instead of
# creating a socket needlessly.
# See https://github.com/urllib3/urllib3/issues/1446
if _appengine_environ.is_appengine_sandbox():
return False
if socket.has_ipv6:
# has_ipv6 returns true if cPython was compiled with IPv6 support.
# It does not tell us if the system has IPv6 support enabled. To
# determine that we must bind to an IPv6 address.
# https://github.com/shazow/urllib3/pull/611
# https://bugs.python.org/issue658327
try:
sock = socket.socket(socket.AF_INET6)
sock.bind((host, 0))
has_ipv6 = True
except Exception:
pass
if sock:
sock.close()
return has_ipv6 |
<SYSTEM_TASK:>
Determine if unicode string only contains ASCII characters.
<END_TASK>
<USER_TASK:>
Description:
def unicode_is_ascii(u_string):
"""Determine if unicode string only contains ASCII characters.
:param str u_string: unicode string to check. Must be unicode
and not Python 2 `str`.
:rtype: bool
""" |
assert isinstance(u_string, str)
try:
u_string.encode('ascii')
return True
except UnicodeEncodeError:
return False |
<SYSTEM_TASK:>
Function for determining if custom platform options are allowed.
<END_TASK>
<USER_TASK:>
Description:
def check_dist_restriction(options, check_target=False):
# type: (Values, bool) -> None
"""Function for determining if custom platform options are allowed.
:param options: The OptionParser options.
:param check_target: Whether or not to check if --target is being used.
""" |
dist_restriction_set = any([
options.python_version,
options.platform,
options.abi,
options.implementation,
])
binary_only = FormatControl(set(), {':all:'})
sdist_dependencies_allowed = (
options.format_control != binary_only and
not options.ignore_dependencies
)
# Installations or downloads using dist restrictions must not combine
# source distributions and dist-specific wheels, as they are not
# gauranteed to be locally compatible.
if dist_restriction_set and sdist_dependencies_allowed:
raise CommandError(
"When restricting platform and interpreter constraints using "
"--python-version, --platform, --abi, or --implementation, "
"either --no-deps must be set, or --only-binary=:all: must be "
"set and --no-binary must not be set (or must be set to "
":none:)."
)
if check_target:
if dist_restriction_set and not options.target_dir:
raise CommandError(
"Can not use any platform or abi specific options unless "
"installing via '--target'"
) |
<SYSTEM_TASK:>
Derive missing values of source from the existing fields.
<END_TASK>
<USER_TASK:>
Description:
def populate_source(cls, source):
"""Derive missing values of source from the existing fields.""" |
# Only URL pararemter is mandatory, let the KeyError be thrown.
if "name" not in source:
source["name"] = get_url_name(source["url"])
if "verify_ssl" not in source:
source["verify_ssl"] = "https://" in source["url"]
if not isinstance(source["verify_ssl"], bool):
source["verify_ssl"] = source["verify_ssl"].lower() == "true"
return source |
<SYSTEM_TASK:>
Get the pinned version of an InstallRequirement.
<END_TASK>
<USER_TASK:>
Description:
def get_pinned_version(ireq):
"""Get the pinned version of an InstallRequirement.
An InstallRequirement is considered pinned if:
- Is not editable
- It has exactly one specifier
- That specifier is "=="
- The version does not contain a wildcard
Examples:
django==1.8 # pinned
django>1.8 # NOT pinned
django~=1.8 # NOT pinned
django==1.* # NOT pinned
Raises `TypeError` if the input is not a valid InstallRequirement, or
`ValueError` if the InstallRequirement is not pinned.
""" |
try:
specifier = ireq.specifier
except AttributeError:
raise TypeError("Expected InstallRequirement, not {}".format(
type(ireq).__name__,
))
if ireq.editable:
raise ValueError("InstallRequirement is editable")
if not specifier:
raise ValueError("InstallRequirement has no version specification")
if len(specifier._specs) != 1:
raise ValueError("InstallRequirement has multiple specifications")
op, version = next(iter(specifier._specs))._spec
if op not in ('==', '===') or version.endswith('.*'):
raise ValueError("InstallRequirement not pinned (is {0!r})".format(
op + version,
))
return version |
<SYSTEM_TASK:>
Returns a new requirement object with extras removed.
<END_TASK>
<USER_TASK:>
Description:
def strip_extras(requirement):
"""Returns a new requirement object with extras removed.
""" |
line = requirement.as_line()
new = type(requirement).from_line(line)
new.extras = None
return new |
<SYSTEM_TASK:>
Initialize the module as appropriate for POSIX systems.
<END_TASK>
<USER_TASK:>
Description:
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems.""" |
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED'] |
<SYSTEM_TASK:>
Initialize the module as appropriate for NT
<END_TASK>
<USER_TASK:>
Description:
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT""" |
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) |
<SYSTEM_TASK:>
Parse a config.h-style file.
<END_TASK>
<USER_TASK:>
Description:
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
""" |
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars |
<SYSTEM_TASK:>
Return a mapping containing an install scheme.
<END_TASK>
<USER_TASK:>
Description:
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
""" |
_ensure_cfg_read()
if expand:
return _expand_vars(scheme, vars)
else:
return dict(_SCHEMES.items(scheme)) |
<SYSTEM_TASK:>
Return a path corresponding to the scheme.
<END_TASK>
<USER_TASK:>
Description:
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
""" |
return get_paths(scheme, vars, expand)[name] |
<SYSTEM_TASK:>
Display all information sysconfig detains.
<END_TASK>
<USER_TASK:>
Description:
def _main():
"""Display all information sysconfig detains.""" |
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars()) |
<SYSTEM_TASK:>
Increments the parser if the end of the input has not been reached.
<END_TASK>
<USER_TASK:>
Description:
def inc(self, exception=None): # type: (Optional[ParseError.__class__]) -> bool
"""
Increments the parser if the end of the input has not been reached.
Returns whether or not it was able to advance.
""" |
try:
self._idx, self._current = next(self._chars)
return True
except StopIteration:
self._idx = len(self)
self._current = self.EOF
if exception:
raise self.parse_error(exception)
return False |
<SYSTEM_TASK:>
Command name auto-correct.
<END_TASK>
<USER_TASK:>
Description:
def get_similar_commands(name):
"""Command name auto-correct.""" |
from difflib import get_close_matches
name = name.lower()
close_commands = get_close_matches(name, commands_dict.keys())
if close_commands:
return close_commands[0]
else:
return False |
<SYSTEM_TASK:>
Create a copy of this extension bound to another environment.
<END_TASK>
<USER_TASK:>
Description:
def bind(self, environment):
"""Create a copy of this extension bound to another environment.""" |
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv |
<SYSTEM_TASK:>
Return an attribute node for the current extension. This is useful
<END_TASK>
<USER_TASK:>
Description:
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
""" |
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno) |
<SYSTEM_TASK:>
Parse until the next block tag with a given name.
<END_TASK>
<USER_TASK:>
Description:
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name.""" |
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf) |
<SYSTEM_TASK:>
Returns a string suitable for running as a shell script.
<END_TASK>
<USER_TASK:>
Description:
def get_cli_string(path=None, action=None, key=None, value=None, quote=None):
"""Returns a string suitable for running as a shell script.
Useful for converting a arguments passed to a fabric task
to be passed to a `local` or `run` command.
""" |
command = ['dotenv']
if quote:
command.append('-q %s' % quote)
if path:
command.append('-f %s' % path)
if action:
command.append(action)
if key:
command.append(key)
if value:
if ' ' in value:
command.append('"%s"' % value)
else:
command.append(value)
return ' '.join(command).strip() |
<SYSTEM_TASK:>
Properly merges both requests and session hooks.
<END_TASK>
<USER_TASK:>
Description:
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
""" |
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class) |
<SYSTEM_TASK:>
Receives a Response. Returns a redirect URI or ``None``
<END_TASK>
<USER_TASK:>
Description:
def get_redirect_target(self, resp):
"""Receives a Response. Returns a redirect URI or ``None``""" |
# Due to the nature of how requests processes redirects this method will
# be called at least once upon the original response and at least twice
# on each subsequent redirect response (if any).
# If a custom mixin is used to handle this logic, it may be advantageous
# to cache the redirect location onto the response object as a private
# attribute.
if resp.is_redirect:
location = resp.headers['location']
# Currently the underlying http module on py3 decode headers
# in latin1, but empirical evidence suggests that latin1 is very
# rarely used with non-ASCII characters in HTTP headers.
# It is more likely to get UTF8 header rather than latin1.
# This causes incorrect handling of UTF8 encoded location headers.
# To solve this, we re-encode the location in latin1.
if is_py3:
location = location.encode('latin1')
return to_native_string(location, 'utf8')
return None |
<SYSTEM_TASK:>
Decide whether Authorization header should be removed when redirecting
<END_TASK>
<USER_TASK:>
Description:
def should_strip_auth(self, old_url, new_url):
"""Decide whether Authorization header should be removed when redirecting""" |
old_parsed = urlparse(old_url)
new_parsed = urlparse(new_url)
if old_parsed.hostname != new_parsed.hostname:
return True
# Special case: allow http -> https redirect when using the standard
# ports. This isn't specified by RFC 7235, but is kept to avoid
# breaking backwards compatibility with older versions of requests
# that allowed any redirects on the same host.
if (old_parsed.scheme == 'http' and old_parsed.port in (80, None)
and new_parsed.scheme == 'https' and new_parsed.port in (443, None)):
return False
# Handle default port usage corresponding to scheme.
changed_port = old_parsed.port != new_parsed.port
changed_scheme = old_parsed.scheme != new_parsed.scheme
default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
if (not changed_scheme and old_parsed.port in default_port
and new_parsed.port in default_port):
return False
# Standard case: root URI must match
return changed_port or changed_scheme |
<SYSTEM_TASK:>
Check the environment and merge it with some settings.
<END_TASK>
<USER_TASK:>
Description:
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""
Check the environment and merge it with some settings.
:rtype: dict
""" |
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
no_proxy = proxies.get('no_proxy') if proxies is not None else None
env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert} |
<SYSTEM_TASK:>
Returns the appropriate connection adapter for the given URL.
<END_TASK>
<USER_TASK:>
Description:
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
""" |
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix.lower()):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url) |
<SYSTEM_TASK:>
Registers a connection adapter to a prefix.
<END_TASK>
<USER_TASK:>
Description:
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by prefix length.
""" |
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key) |
<SYSTEM_TASK:>
Return a string, safe for output, of subprocess output.
<END_TASK>
<USER_TASK:>
Description:
def console_to_str(data):
# type: (bytes) -> Text
"""Return a string, safe for output, of subprocess output.
We assume the data is in the locale preferred encoding.
If it won't decode properly, we warn the user but decode as
best we can.
We also ensure that the output can be safely written to
standard output without encoding errors.
""" |
# First, get the encoding we assume. This is the preferred
# encoding for the locale, unless that is not found, or
# it is ASCII, in which case assume UTF-8
encoding = locale.getpreferredencoding()
if (not encoding) or codecs.lookup(encoding).name == "ascii":
encoding = "utf-8"
# Now try to decode the data - if we fail, warn the user and
# decode with replacement.
try:
decoded_data = data.decode(encoding)
except UnicodeDecodeError:
logger.warning(
"Subprocess output does not appear to be encoded as %s",
encoding,
)
decoded_data = data.decode(encoding, errors=backslashreplace_decode)
# Make sure we can print the output, by encoding it to the output
# encoding with replacement of unencodable characters, and then
# decoding again.
# We use stderr's encoding because it's less likely to be
# redirected and if we don't find an encoding we skip this
# step (on the assumption that output is wrapped by something
# that won't fail).
# The double getattr is to deal with the possibility that we're
# being called in a situation where sys.__stderr__ doesn't exist,
# or doesn't have an encoding attribute. Neither of these cases
# should occur in normal pip use, but there's no harm in checking
# in case people use pip in (unsupported) unusual situations.
output_encoding = getattr(getattr(sys, "__stderr__", None),
"encoding", None)
if output_encoding:
output_encoded = decoded_data.encode(
output_encoding,
errors="backslashreplace"
)
decoded_data = output_encoded.decode(output_encoding)
return decoded_data |
<SYSTEM_TASK:>
Return path's uid.
<END_TASK>
<USER_TASK:>
Description:
def get_path_uid(path):
# type: (str) -> int
"""
Return path's uid.
Does not follow symlinks:
https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in compat due to differences on AIX and
Jython, that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
""" |
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError(
"%s is a symlink; Will not return uid for symlinks" % path
)
return file_uid |
<SYSTEM_TASK:>
Expand ~ and ~user constructions.
<END_TASK>
<USER_TASK:>
Description:
def expanduser(path):
# type: (str) -> str
"""
Expand ~ and ~user constructions.
Includes a workaround for https://bugs.python.org/issue14768
""" |
expanded = os.path.expanduser(path)
if path.startswith('~/') and expanded.startswith('//'):
expanded = expanded[1:]
return expanded |
<SYSTEM_TASK:>
Retrieve all of the relevant constraints, aggregated from the pipfile, resolver,
<END_TASK>
<USER_TASK:>
Description:
def get_constraints(self):
"""
Retrieve all of the relevant constraints, aggregated from the pipfile, resolver,
and parent dependencies and their respective conflict resolution where possible.
:return: A set of **InstallRequirement** instances representing constraints
:rtype: Set
""" |
constraints = {
c for c in self.resolver.parsed_constraints
if c and c.name == self.entry.name
}
pipfile_constraint = self.get_pipfile_constraint()
if pipfile_constraint:
constraints.add(pipfile_constraint)
return constraints |
<SYSTEM_TASK:>
Given a resolved entry with multiple parent dependencies with different
<END_TASK>
<USER_TASK:>
Description:
def constraint_from_parent_conflicts(self):
"""
Given a resolved entry with multiple parent dependencies with different
constraints, searches for the resolution that satisfies all of the parent
constraints.
:return: A new **InstallRequirement** satisfying all parent constraints
:raises: :exc:`~pipenv.exceptions.DependencyConflict` if resolution is impossible
""" |
# ensure that we satisfy the parent dependencies of this dep
from pipenv.vendor.packaging.specifiers import Specifier
parent_dependencies = set()
has_mismatch = False
can_use_original = True
for p in self.parent_deps:
# updated dependencies should be satisfied since they were resolved already
if p.is_updated:
continue
# parents with no requirements can't conflict
if not p.requirements:
continue
needed = p.requirements.get("dependencies", [])
entry_ref = p.get_dependency(self.name)
required = entry_ref.get("required_version", "*")
required = self.clean_specifier(required)
parent_requires = self.make_requirement(self.name, required)
parent_dependencies.add("{0} => {1} ({2})".format(p.name, self.name, required))
if not parent_requires.requirement.specifier.contains(self.original_version):
can_use_original = False
if not parent_requires.requirement.specifier.contains(self.updated_version):
has_mismatch = True
if has_mismatch and not can_use_original:
from pipenv.exceptions import DependencyConflict
msg = (
"Cannot resolve {0} ({1}) due to conflicting parent dependencies: "
"\n\t{2}".format(
self.name, self.updated_version, "\n\t".join(parent_dependencies)
)
)
raise DependencyConflict(msg)
elif can_use_original:
return self.lockfile_entry.as_ireq()
return self.entry.as_ireq() |
<SYSTEM_TASK:>
Retrieves the full set of available constraints and iterate over them, validating
<END_TASK>
<USER_TASK:>
Description:
def validate_constraints(self):
"""
Retrieves the full set of available constraints and iterate over them, validating
that they exist and that they are not causing unresolvable conflicts.
:return: True if the constraints are satisfied by the resolution provided
:raises: :exc:`pipenv.exceptions.DependencyConflict` if the constraints dont exist
""" |
constraints = self.get_constraints()
for constraint in constraints:
try:
constraint.check_if_exists(False)
except Exception:
from pipenv.exceptions import DependencyConflict
msg = (
"Cannot resolve conflicting version {0}{1} while {1}{2} is "
"locked.".format(
self.name, self.updated_specifier, self.old_name, self.old_specifiers
)
)
raise DependencyConflict(msg)
return True |
<SYSTEM_TASK:>
SecureTransport read callback. This is called by ST to request that data
<END_TASK>
<USER_TASK:>
Description:
def _read_callback(connection_id, data_buffer, data_length_pointer):
"""
SecureTransport read callback. This is called by ST to request that data
be returned from the socket.
""" |
wrapped_socket = None
try:
wrapped_socket = _connection_refs.get(connection_id)
if wrapped_socket is None:
return SecurityConst.errSSLInternal
base_socket = wrapped_socket.socket
requested_length = data_length_pointer[0]
timeout = wrapped_socket.gettimeout()
error = None
read_count = 0
try:
while read_count < requested_length:
if timeout is None or timeout >= 0:
if not util.wait_for_read(base_socket, timeout):
raise socket.error(errno.EAGAIN, 'timed out')
remaining = requested_length - read_count
buffer = (ctypes.c_char * remaining).from_address(
data_buffer + read_count
)
chunk_size = base_socket.recv_into(buffer, remaining)
read_count += chunk_size
if not chunk_size:
if not read_count:
return SecurityConst.errSSLClosedGraceful
break
except (socket.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
data_length_pointer[0] = read_count
if error == errno.ECONNRESET or error == errno.EPIPE:
return SecurityConst.errSSLClosedAbort
raise
data_length_pointer[0] = read_count
if read_count != requested_length:
return SecurityConst.errSSLWouldBlock
return 0
except Exception as e:
if wrapped_socket is not None:
wrapped_socket._exception = e
return SecurityConst.errSSLInternal |
<SYSTEM_TASK:>
SecureTransport write callback. This is called by ST to request that data
<END_TASK>
<USER_TASK:>
Description:
def _write_callback(connection_id, data_buffer, data_length_pointer):
"""
SecureTransport write callback. This is called by ST to request that data
actually be sent on the network.
""" |
wrapped_socket = None
try:
wrapped_socket = _connection_refs.get(connection_id)
if wrapped_socket is None:
return SecurityConst.errSSLInternal
base_socket = wrapped_socket.socket
bytes_to_write = data_length_pointer[0]
data = ctypes.string_at(data_buffer, bytes_to_write)
timeout = wrapped_socket.gettimeout()
error = None
sent = 0
try:
while sent < bytes_to_write:
if timeout is None or timeout >= 0:
if not util.wait_for_write(base_socket, timeout):
raise socket.error(errno.EAGAIN, 'timed out')
chunk_sent = base_socket.send(data)
sent += chunk_sent
# This has some needless copying here, but I'm not sure there's
# much value in optimising this data path.
data = data[chunk_sent:]
except (socket.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
data_length_pointer[0] = sent
if error == errno.ECONNRESET or error == errno.EPIPE:
return SecurityConst.errSSLClosedAbort
raise
data_length_pointer[0] = sent
if sent != bytes_to_write:
return SecurityConst.errSSLWouldBlock
return 0
except Exception as e:
if wrapped_socket is not None:
wrapped_socket._exception = e
return SecurityConst.errSSLInternal |
<SYSTEM_TASK:>
Sets up the allowed ciphers. By default this matches the set in
<END_TASK>
<USER_TASK:>
Description:
def _set_ciphers(self):
"""
Sets up the allowed ciphers. By default this matches the set in
util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
custom and doesn't allow changing at this time, mostly because parsing
OpenSSL cipher strings is going to be a freaking nightmare.
""" |
ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
result = Security.SSLSetEnabledCiphers(
self.context, ciphers, len(CIPHER_SUITES)
)
_assert_no_error(result) |
<SYSTEM_TASK:>
Actually performs the TLS handshake. This is run automatically by
<END_TASK>
<USER_TASK:>
Description:
def handshake(self,
server_hostname,
verify,
trust_bundle,
min_version,
max_version,
client_cert,
client_key,
client_key_passphrase):
"""
Actually performs the TLS handshake. This is run automatically by
wrapped socket, and shouldn't be needed in user code.
""" |
# First, we do the initial bits of connection setup. We need to create
# a context, set its I/O funcs, and set the connection reference.
self.context = Security.SSLCreateContext(
None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
)
result = Security.SSLSetIOFuncs(
self.context, _read_callback_pointer, _write_callback_pointer
)
_assert_no_error(result)
# Here we need to compute the handle to use. We do this by taking the
# id of self modulo 2**31 - 1. If this is already in the dictionary, we
# just keep incrementing by one until we find a free space.
with _connection_ref_lock:
handle = id(self) % 2147483647
while handle in _connection_refs:
handle = (handle + 1) % 2147483647
_connection_refs[handle] = self
result = Security.SSLSetConnection(self.context, handle)
_assert_no_error(result)
# If we have a server hostname, we should set that too.
if server_hostname:
if not isinstance(server_hostname, bytes):
server_hostname = server_hostname.encode('utf-8')
result = Security.SSLSetPeerDomainName(
self.context, server_hostname, len(server_hostname)
)
_assert_no_error(result)
# Setup the ciphers.
self._set_ciphers()
# Set the minimum and maximum TLS versions.
result = Security.SSLSetProtocolVersionMin(self.context, min_version)
_assert_no_error(result)
result = Security.SSLSetProtocolVersionMax(self.context, max_version)
_assert_no_error(result)
# If there's a trust DB, we need to use it. We do that by telling
# SecureTransport to break on server auth. We also do that if we don't
# want to validate the certs at all: we just won't actually do any
# authing in that case.
if not verify or trust_bundle is not None:
result = Security.SSLSetSessionOption(
self.context,
SecurityConst.kSSLSessionOptionBreakOnServerAuth,
True
)
_assert_no_error(result)
# If there's a client cert, we need to use it.
if client_cert:
self._keychain, self._keychain_dir = _temporary_keychain()
self._client_cert_chain = _load_client_cert_chain(
self._keychain, client_cert, client_key
)
result = Security.SSLSetCertificate(
self.context, self._client_cert_chain
)
_assert_no_error(result)
while True:
with self._raise_on_error():
result = Security.SSLHandshake(self.context)
if result == SecurityConst.errSSLWouldBlock:
raise socket.timeout("handshake timed out")
elif result == SecurityConst.errSSLServerAuthCompleted:
self._custom_validate(verify, trust_bundle)
continue
else:
_assert_no_error(result)
break |
<SYSTEM_TASK:>
Returns the unique hash key for this template name.
<END_TASK>
<USER_TASK:>
Description:
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name.""" |
hash = sha1(name.encode('utf-8'))
if filename is not None:
filename = '|' + filename
if isinstance(filename, text_type):
filename = filename.encode('utf-8')
hash.update(filename)
return hash.hexdigest() |
<SYSTEM_TASK:>
Return a cache bucket for the given template. All arguments are
<END_TASK>
<USER_TASK:>
Description:
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
""" |
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket |
<SYSTEM_TASK:>
Accept either an encoding object or label.
<END_TASK>
<USER_TASK:>
Description:
def _get_encoding(encoding_or_label):
"""
Accept either an encoding object or label.
:param encoding: An :class:`Encoding` object or a label string.
:returns: An :class:`Encoding` object.
:raises: :exc:`~exceptions.LookupError` for an unknown label.
""" |
if hasattr(encoding_or_label, 'codec_info'):
return encoding_or_label
encoding = lookup(encoding_or_label)
if encoding is None:
raise LookupError('Unknown encoding label: %r' % encoding_or_label)
return encoding |
<SYSTEM_TASK:>
Decode a single string.
<END_TASK>
<USER_TASK:>
Description:
def decode(input, fallback_encoding, errors='replace'):
"""
Decode a single string.
:param input: A byte string
:param fallback_encoding:
An :class:`Encoding` object or a label string.
The encoding to use if :obj:`input` does note have a BOM.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:return:
A ``(output, encoding)`` tuple of an Unicode string
and an :obj:`Encoding`.
""" |
# Fail early if `encoding` is an invalid label.
fallback_encoding = _get_encoding(fallback_encoding)
bom_encoding, input = _detect_bom(input)
encoding = bom_encoding or fallback_encoding
return encoding.codec_info.decode(input, errors)[0], encoding |
<SYSTEM_TASK:>
Encode a single string.
<END_TASK>
<USER_TASK:>
Description:
def encode(input, encoding=UTF8, errors='strict'):
"""
Encode a single string.
:param input: An Unicode string.
:param encoding: An :class:`Encoding` object or a label string.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:return: A byte string.
""" |
return _get_encoding(encoding).codec_info.encode(input, errors)[0] |
<SYSTEM_TASK:>
"Pull"-based decoder.
<END_TASK>
<USER_TASK:>
Description:
def iter_decode(input, fallback_encoding, errors='replace'):
"""
"Pull"-based decoder.
:param input:
An iterable of byte strings.
The input is first consumed just enough to determine the encoding
based on the precense of a BOM,
then consumed on demand when the return value is.
:param fallback_encoding:
An :class:`Encoding` object or a label string.
The encoding to use if :obj:`input` does note have a BOM.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:returns:
An ``(output, encoding)`` tuple.
:obj:`output` is an iterable of Unicode strings,
:obj:`encoding` is the :obj:`Encoding` that is being used.
""" |
decoder = IncrementalDecoder(fallback_encoding, errors)
generator = _iter_decode_generator(input, decoder)
encoding = next(generator)
return generator, encoding |
<SYSTEM_TASK:>
Decode one chunk of the input.
<END_TASK>
<USER_TASK:>
Description:
def decode(self, input, final=False):
"""Decode one chunk of the input.
:param input: A byte string.
:param final:
Indicate that no more input is available.
Must be :obj:`True` if this is the last call.
:returns: An Unicode string.
""" |
decoder = self._decoder
if decoder is not None:
return decoder(input, final)
input = self._buffer + input
encoding, input = _detect_bom(input)
if encoding is None:
if len(input) < 3 and not final: # Not enough data yet.
self._buffer = input
return ''
else: # No BOM
encoding = self._fallback_encoding
decoder = encoding.codec_info.incrementaldecoder(self._errors).decode
self._decoder = decoder
self.encoding = encoding
return decoder(input, final) |
<SYSTEM_TASK:>
Given a list of Python tuples, create an associated CFDictionary.
<END_TASK>
<USER_TASK:>
Description:
def _cf_dictionary_from_tuples(tuples):
"""
Given a list of Python tuples, create an associated CFDictionary.
""" |
dictionary_size = len(tuples)
# We need to get the dictionary keys and values out in the same order.
keys = (t[0] for t in tuples)
values = (t[1] for t in tuples)
cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
return CoreFoundation.CFDictionaryCreate(
CoreFoundation.kCFAllocatorDefault,
cf_keys,
cf_values,
dictionary_size,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
) |
<SYSTEM_TASK:>
Creates a Unicode string from a CFString object. Used entirely for error
<END_TASK>
<USER_TASK:>
Description:
def _cf_string_to_unicode(value):
"""
Creates a Unicode string from a CFString object. Used entirely for error
reporting.
Yes, it annoys me quite a lot that this function is this complex.
""" |
value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
string = CoreFoundation.CFStringGetCStringPtr(
value_as_void_p,
CFConst.kCFStringEncodingUTF8
)
if string is None:
buffer = ctypes.create_string_buffer(1024)
result = CoreFoundation.CFStringGetCString(
value_as_void_p,
buffer,
1024,
CFConst.kCFStringEncodingUTF8
)
if not result:
raise OSError('Error copying C string from CFStringRef')
string = buffer.value
if string is not None:
string = string.decode('utf-8')
return string |
<SYSTEM_TASK:>
Checks the return code and throws an exception if there is an error to
<END_TASK>
<USER_TASK:>
Description:
def _assert_no_error(error, exception_class=None):
"""
Checks the return code and throws an exception if there is an error to
report
""" |
if error == 0:
return
cf_error_string = Security.SecCopyErrorMessageString(error, None)
output = _cf_string_to_unicode(cf_error_string)
CoreFoundation.CFRelease(cf_error_string)
if output is None or output == u'':
output = u'OSStatus %s' % error
if exception_class is None:
exception_class = ssl.SSLError
raise exception_class(output) |
<SYSTEM_TASK:>
Given a bundle of certs in PEM format, turns them into a CFArray of certs
<END_TASK>
<USER_TASK:>
Description:
def _cert_array_from_pem(pem_bundle):
"""
Given a bundle of certs in PEM format, turns them into a CFArray of certs
that can be used to validate a cert chain.
""" |
# Normalize the PEM bundle's line endings.
pem_bundle = pem_bundle.replace(b"\r\n", b"\n")
der_certs = [
base64.b64decode(match.group(1))
for match in _PEM_CERTS_RE.finditer(pem_bundle)
]
if not der_certs:
raise ssl.SSLError("No root certificates specified")
cert_array = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
)
if not cert_array:
raise ssl.SSLError("Unable to allocate memory!")
try:
for der_bytes in der_certs:
certdata = _cf_data_from_bytes(der_bytes)
if not certdata:
raise ssl.SSLError("Unable to allocate memory!")
cert = Security.SecCertificateCreateWithData(
CoreFoundation.kCFAllocatorDefault, certdata
)
CoreFoundation.CFRelease(certdata)
if not cert:
raise ssl.SSLError("Unable to build cert object!")
CoreFoundation.CFArrayAppendValue(cert_array, cert)
CoreFoundation.CFRelease(cert)
except Exception:
# We need to free the array before the exception bubbles further.
# We only want to do that if an error occurs: otherwise, the caller
# should free.
CoreFoundation.CFRelease(cert_array)
return cert_array |
<SYSTEM_TASK:>
This function creates a temporary Mac keychain that we can use to work with
<END_TASK>
<USER_TASK:>
Description:
def _temporary_keychain():
"""
This function creates a temporary Mac keychain that we can use to work with
credentials. This keychain uses a one-time password and a temporary file to
store the data. We expect to have one keychain per socket. The returned
SecKeychainRef must be freed by the caller, including calling
SecKeychainDelete.
Returns a tuple of the SecKeychainRef and the path to the temporary
directory that contains it.
""" |
# Unfortunately, SecKeychainCreate requires a path to a keychain. This
# means we cannot use mkstemp to use a generic temporary file. Instead,
# we're going to create a temporary directory and a filename to use there.
# This filename will be 8 random bytes expanded into base64. We also need
# some random bytes to password-protect the keychain we're creating, so we
# ask for 40 random bytes.
random_bytes = os.urandom(40)
filename = base64.b16encode(random_bytes[:8]).decode('utf-8')
password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8
tempdirectory = tempfile.mkdtemp()
keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
# We now want to create the keychain itself.
keychain = Security.SecKeychainRef()
status = Security.SecKeychainCreate(
keychain_path,
len(password),
password,
False,
None,
ctypes.byref(keychain)
)
_assert_no_error(status)
# Having created the keychain, we want to pass it off to the caller.
return keychain, tempdirectory |
<SYSTEM_TASK:>
Load certificates and maybe keys from a number of files. Has the end goal
<END_TASK>
<USER_TASK:>
Description:
def _load_client_cert_chain(keychain, *paths):
"""
Load certificates and maybe keys from a number of files. Has the end goal
of returning a CFArray containing one SecIdentityRef, and then zero or more
SecCertificateRef objects, suitable for use as a client certificate trust
chain.
""" |
# Ok, the strategy.
#
# This relies on knowing that macOS will not give you a SecIdentityRef
# unless you have imported a key into a keychain. This is a somewhat
# artificial limitation of macOS (for example, it doesn't necessarily
# affect iOS), but there is nothing inside Security.framework that lets you
# get a SecIdentityRef without having a key in a keychain.
#
# So the policy here is we take all the files and iterate them in order.
# Each one will use SecItemImport to have one or more objects loaded from
# it. We will also point at a keychain that macOS can use to work with the
# private key.
#
# Once we have all the objects, we'll check what we actually have. If we
# already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
# we'll take the first certificate (which we assume to be our leaf) and
# ask the keychain to give us a SecIdentityRef with that cert's associated
# key.
#
# We'll then return a CFArray containing the trust chain: one
# SecIdentityRef and then zero-or-more SecCertificateRef objects. The
# responsibility for freeing this CFArray will be with the caller. This
# CFArray must remain alive for the entire connection, so in practice it
# will be stored with a single SSLSocket, along with the reference to the
# keychain.
certificates = []
identities = []
# Filter out bad paths.
paths = (path for path in paths if path)
try:
for file_path in paths:
new_identities, new_certs = _load_items_from_file(
keychain, file_path
)
identities.extend(new_identities)
certificates.extend(new_certs)
# Ok, we have everything. The question is: do we have an identity? If
# not, we want to grab one from the first cert we have.
if not identities:
new_identity = Security.SecIdentityRef()
status = Security.SecIdentityCreateWithCertificate(
keychain,
certificates[0],
ctypes.byref(new_identity)
)
_assert_no_error(status)
identities.append(new_identity)
# We now want to release the original certificate, as we no longer
# need it.
CoreFoundation.CFRelease(certificates.pop(0))
# We now need to build a new CFArray that holds the trust chain.
trust_chain = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
)
for item in itertools.chain(identities, certificates):
# ArrayAppendValue does a CFRetain on the item. That's fine,
# because the finally block will release our other refs to them.
CoreFoundation.CFArrayAppendValue(trust_chain, item)
return trust_chain
finally:
for obj in itertools.chain(identities, certificates):
CoreFoundation.CFRelease(obj) |
<SYSTEM_TASK:>
Should we redirect and where to?
<END_TASK>
<USER_TASK:>
Description:
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
""" |
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False |
<SYSTEM_TASK:>
Set initial length value for Response content if available.
<END_TASK>
<USER_TASK:>
Description:
def _init_length(self, request_method):
"""
Set initial length value for Response content if available.
""" |
length = self.headers.get('content-length')
if length is not None:
if self.chunked:
# This Response will fail with an IncompleteRead if it can't be
# received as chunked. This method falls back to attempt reading
# the response before raising an exception.
log.warning("Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked.")
return None
try:
# RFC 7230 section 3.3.2 specifies multiple content lengths can
# be sent in a single Content-Length header
# (e.g. Content-Length: 42, 42). This line ensures the values
# are all valid ints and that as long as the `set` length is 1,
# all values are the same. Otherwise, the header is invalid.
lengths = set([int(val) for val in length.split(',')])
if len(lengths) > 1:
raise InvalidHeader("Content-Length contained multiple "
"unmatching values (%s)" % length)
length = lengths.pop()
except ValueError:
length = None
else:
if length < 0:
length = None
# Convert status to int for comparison
# In some cases, httplib returns a status of "_UNKNOWN"
try:
status = int(self.status)
except ValueError:
status = 0
# Check for responses that shouldn't include a body
if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
length = 0
return length |
<SYSTEM_TASK:>
Flushes the decoder. Should only be called if the decoder is actually
<END_TASK>
<USER_TASK:>
Description:
def _flush_decoder(self):
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
""" |
if self._decoder:
buf = self._decoder.decompress(b'')
return buf + self._decoder.flush()
return b'' |
<SYSTEM_TASK:>
Catch low-level python exceptions, instead re-raising urllib3
<END_TASK>
<USER_TASK:>
Description:
def _error_catcher(self):
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
""" |
clean_exit = False
try:
try:
yield
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except (HTTPException, SocketError) as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn() |
<SYSTEM_TASK:>
Returns the URL that was the source of this response.
<END_TASK>
<USER_TASK:>
Description:
def geturl(self):
"""
Returns the URL that was the source of this response.
If the request that generated this response redirected, this method
will return the final redirect location.
""" |
if self.retries is not None and len(self.retries.history):
return self.retries.history[-1].redirect_location
else:
return self._request_url |
<SYSTEM_TASK:>
Set fail finalizer to a spinner.
<END_TASK>
<USER_TASK:>
Description:
def fail(self, text=u"FAIL", err=False):
"""Set fail finalizer to a spinner.""" |
# Do not display spin text for fail state
self._text = None
_text = text if text else u"FAIL"
err = err or not self.write_to_stdout
self._freeze(_text, err=err) |
<SYSTEM_TASK:>
Write error text in the terminal without breaking the spinner.
<END_TASK>
<USER_TASK:>
Description:
def write_err(self, text):
"""Write error text in the terminal without breaking the spinner.""" |
stderr = self.stderr
if self.stderr.closed:
stderr = sys.stderr
stderr.write(decode_output(u"\r", target_stream=stderr))
stderr.write(decode_output(CLEAR_LINE, target_stream=stderr))
if text is None:
text = ""
text = decode_output(u"{0}\n".format(text), target_stream=stderr)
self.stderr.write(text)
self.out_buff.write(decode_output(text, target_stream=self.out_buff)) |
<SYSTEM_TASK:>
Like `describe_token` but for token expressions.
<END_TASK>
<USER_TASK:>
Description:
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions.""" |
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type) |
<SYSTEM_TASK:>
Return a lexer which is probably cached.
<END_TASK>
<USER_TASK:>
Description:
def get_lexer(environment):
"""Return a lexer which is probably cached.""" |
key = (environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.lstrip_blocks,
environment.newline_sequence,
environment.keep_trailing_newline)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer |
<SYSTEM_TASK:>
Calls tokeniter + tokenize and wraps it in a token stream.
<END_TASK>
<USER_TASK:>
Description:
def tokenize(self, source, name=None, filename=None, state=None):
"""Calls tokeniter + tokenize and wraps it in a token stream.
""" |
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename) |
<SYSTEM_TASK:>
Iterate through CPython versions available for Pipenv to install.
<END_TASK>
<USER_TASK:>
Description:
def iter_installable_versions(self):
"""Iterate through CPython versions available for Pipenv to install.
""" |
for name in self._pyenv('install', '--list').out.splitlines():
try:
version = Version.parse(name.strip())
except ValueError:
continue
yield version |
<SYSTEM_TASK:>
Find a version in pyenv from the version supplied.
<END_TASK>
<USER_TASK:>
Description:
def find_version_to_install(self, name):
"""Find a version in pyenv from the version supplied.
A ValueError is raised if a matching version cannot be found.
""" |
version = Version.parse(name)
if version.patch is not None:
return name
try:
best_match = max((
inst_version
for inst_version in self.iter_installable_versions()
if inst_version.matches_minor(version)
), key=operator.attrgetter('cmpkey'))
except ValueError:
raise ValueError(
'no installable version found for {0!r}'.format(name),
)
return best_match |
<SYSTEM_TASK:>
Install the given version with pyenv.
<END_TASK>
<USER_TASK:>
Description:
def install(self, version):
"""Install the given version with pyenv.
The version must be a ``Version`` instance representing a version
found in pyenv.
A ValueError is raised if the given version does not have a match in
pyenv. A PyenvError is raised if the pyenv command fails.
""" |
c = self._pyenv(
'install', '-s', str(version),
timeout=PIPENV_INSTALL_TIMEOUT,
)
return c |
<SYSTEM_TASK:>
Returns helpful msg in case requirements file does not exist,
<END_TASK>
<USER_TASK:>
Description:
def deduce_helpful_msg(req):
# type: (str) -> str
"""Returns helpful msg in case requirements file does not exist,
or cannot be parsed.
:params req: Requirements file path
""" |
msg = ""
if os.path.exists(req):
msg = " It does exist."
# Try to parse and check if it is a requirements file.
try:
with open(req, 'r') as fp:
# parse first line only
next(parse_requirements(fp.read()))
msg += " The argument you provided " + \
"(%s) appears to be a" % (req) + \
" requirements file. If that is the" + \
" case, use the '-r' flag to install" + \
" the packages specified within it."
except RequirementParseError:
logger.debug("Cannot parse '%s' as requirements \
file" % (req), exc_info=True)
else:
msg += " File '%s' does not exist." % (req)
return msg |
<SYSTEM_TASK:>
Return the file cache path based on the URL.
<END_TASK>
<USER_TASK:>
Description:
def url_to_file_path(url, filecache):
"""Return the file cache path based on the URL.
This does not ensure the file exists!
""" |
key = CacheController.cache_url(url)
return filecache._fn(key) |
<SYSTEM_TASK:>
Write the PID in the named PID file.
<END_TASK>
<USER_TASK:>
Description:
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
""" |
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = 0o644
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
pidfile.write("%s\n" % pid)
pidfile.close() |
<SYSTEM_TASK:>
Remove the named PID file if it exists.
<END_TASK>
<USER_TASK:>
Description:
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
""" |
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise |
<SYSTEM_TASK:>
Print the informations from installed distributions found.
<END_TASK>
<USER_TASK:>
Description:
def print_results(distributions, list_files=False, verbose=False):
"""
Print the informations from installed distributions found.
""" |
results_printed = False
for i, dist in enumerate(distributions):
results_printed = True
if i > 0:
logger.info("---")
name = dist.get('name', '')
required_by = [
pkg.project_name for pkg in pkg_resources.working_set
if name in [required.name for required in pkg.requires()]
]
logger.info("Name: %s", name)
logger.info("Version: %s", dist.get('version', ''))
logger.info("Summary: %s", dist.get('summary', ''))
logger.info("Home-page: %s", dist.get('home-page', ''))
logger.info("Author: %s", dist.get('author', ''))
logger.info("Author-email: %s", dist.get('author-email', ''))
logger.info("License: %s", dist.get('license', ''))
logger.info("Location: %s", dist.get('location', ''))
logger.info("Requires: %s", ', '.join(dist.get('requires', [])))
logger.info("Required-by: %s", ', '.join(required_by))
if verbose:
logger.info("Metadata-Version: %s",
dist.get('metadata-version', ''))
logger.info("Installer: %s", dist.get('installer', ''))
logger.info("Classifiers:")
for classifier in dist.get('classifiers', []):
logger.info(" %s", classifier)
logger.info("Entry-points:")
for entry in dist.get('entry_points', []):
logger.info(" %s", entry.strip())
if list_files:
logger.info("Files:")
for line in dist.get('files', []):
logger.info(" %s", line.strip())
if "files" not in dist:
logger.info("Cannot locate installed-files.txt")
return results_printed |
<SYSTEM_TASK:>
Convenience method that raises `exc` with the message, passed
<END_TASK>
<USER_TASK:>
Description:
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
""" |
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename) |
<SYSTEM_TASK:>
Called if the parser encounters an unknown tag. Tries to fail
<END_TASK>
<USER_TASK:>
Description:
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
""" |
return self._fail_ut_eof(name, self._end_token_stack, lineno) |
<SYSTEM_TASK:>
Are we at the end of a tuple?
<END_TASK>
<USER_TASK:>
Description:
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?""" |
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False |
<SYSTEM_TASK:>
Parse multiple statements into a list until one of the end tokens
<END_TASK>
<USER_TASK:>
Description:
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
""" |
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result |
<SYSTEM_TASK:>
Parse an assignment target. As Jinja2 allows assignments to
<END_TASK>
<USER_TASK:>
Description:
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None, with_namespace=False):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function. If
`with_namespace` is enabled, a namespace assignment may be parsed.
""" |
if with_namespace and self.stream.look().type == 'dot':
token = self.stream.expect('name')
next(self.stream) # dot
attr = self.stream.expect('name')
target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
elif name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target |
<SYSTEM_TASK:>
Parse the whole template into a `Template` node.
<END_TASK>
<USER_TASK:>
Description:
def parse(self):
"""Parse the whole template into a `Template` node.""" |
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result |
<SYSTEM_TASK:>
Normalize the URL to create a safe key for the cache
<END_TASK>
<USER_TASK:>
Description:
def _urlnorm(cls, uri):
"""Normalize the URL to create a safe key for the cache""" |
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
scheme = scheme.lower()
authority = authority.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
defrag_uri = scheme + "://" + authority + request_uri
return defrag_uri |
<SYSTEM_TASK:>
Return a cached response if it exists in the cache, otherwise
<END_TASK>
<USER_TASK:>
Description:
def cached_request(self, request):
"""
Return a cached response if it exists in the cache, otherwise
return False.
""" |
cache_url = self.cache_url(request.url)
logger.debug('Looking up "%s" in the cache', cache_url)
cc = self.parse_cache_control(request.headers)
# Bail out if the request insists on fresh data
if "no-cache" in cc:
logger.debug('Request header has "no-cache", cache bypassed')
return False
if "max-age" in cc and cc["max-age"] == 0:
logger.debug('Request header has "max_age" as 0, cache bypassed')
return False
# Request allows serving from the cache, let's see if we find something
cache_data = self.cache.get(cache_url)
if cache_data is None:
logger.debug("No cache entry available")
return False
# Check whether it can be deserialized
resp = self.serializer.loads(request, cache_data)
if not resp:
logger.warning("Cache entry deserialization failed, entry ignored")
return False
# If we have a cached 301, return it immediately. We don't
# need to test our response for other headers b/c it is
# intrinsically "cacheable" as it is Permanent.
# See:
# https://tools.ietf.org/html/rfc7231#section-6.4.2
#
# Client can try to refresh the value by repeating the request
# with cache busting headers as usual (ie no-cache).
if resp.status == 301:
msg = (
'Returning cached "301 Moved Permanently" response '
"(ignoring date and etag information)"
)
logger.debug(msg)
return resp
headers = CaseInsensitiveDict(resp.headers)
if not headers or "date" not in headers:
if "etag" not in headers:
# Without date or etag, the cached response can never be used
# and should be deleted.
logger.debug("Purging cached response: no date or etag")
self.cache.delete(cache_url)
logger.debug("Ignoring cached response: no date")
return False
now = time.time()
date = calendar.timegm(parsedate_tz(headers["date"]))
current_age = max(0, now - date)
logger.debug("Current age based on date: %i", current_age)
# TODO: There is an assumption that the result will be a
# urllib3 response object. This may not be best since we
# could probably avoid instantiating or constructing the
# response until we know we need it.
resp_cc = self.parse_cache_control(headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
if "max-age" in resp_cc:
freshness_lifetime = resp_cc["max-age"]
logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime)
# If there isn't a max-age, check for an expires header
elif "expires" in headers:
expires = parsedate_tz(headers["expires"])
if expires is not None:
expire_time = calendar.timegm(expires) - date
freshness_lifetime = max(0, expire_time)
logger.debug("Freshness lifetime from expires: %i", freshness_lifetime)
# Determine if we are setting freshness limit in the
# request. Note, this overrides what was in the response.
if "max-age" in cc:
freshness_lifetime = cc["max-age"]
logger.debug(
"Freshness lifetime from request max-age: %i", freshness_lifetime
)
if "min-fresh" in cc:
min_fresh = cc["min-fresh"]
# adjust our current age by our min fresh
current_age += min_fresh
logger.debug("Adjusted current age from min-fresh: %i", current_age)
# Return entry if it is fresh enough
if freshness_lifetime > current_age:
logger.debug('The response is "fresh", returning cached response')
logger.debug("%i > %i", freshness_lifetime, current_age)
return resp
# we're not fresh. If we don't have an Etag, clear it out
if "etag" not in headers:
logger.debug('The cached response is "stale" with no etag, purging')
self.cache.delete(cache_url)
# return the original handler
return False |
<SYSTEM_TASK:>
Algorithm for caching requests.
<END_TASK>
<USER_TASK:>
Description:
def cache_response(self, request, response, body=None, status_codes=None):
"""
Algorithm for caching requests.
This assumes a requests Response object.
""" |
# From httplib2: Don't cache 206's since we aren't going to
# handle byte range requests
cacheable_status_codes = status_codes or self.cacheable_status_codes
if response.status not in cacheable_status_codes:
logger.debug(
"Status code %s not in %s", response.status, cacheable_status_codes
)
return
response_headers = CaseInsensitiveDict(response.headers)
# If we've been given a body, our response has a Content-Length, that
# Content-Length is valid then we can check to see if the body we've
# been given matches the expected size, and if it doesn't we'll just
# skip trying to cache it.
if (
body is not None
and "content-length" in response_headers
and response_headers["content-length"].isdigit()
and int(response_headers["content-length"]) != len(body)
):
return
cc_req = self.parse_cache_control(request.headers)
cc = self.parse_cache_control(response_headers)
cache_url = self.cache_url(request.url)
logger.debug('Updating cache with response from "%s"', cache_url)
# Delete it from the cache if we happen to have it stored there
no_store = False
if "no-store" in cc:
no_store = True
logger.debug('Response header has "no-store"')
if "no-store" in cc_req:
no_store = True
logger.debug('Request header has "no-store"')
if no_store and self.cache.get(cache_url):
logger.debug('Purging existing cache entry to honor "no-store"')
self.cache.delete(cache_url)
if no_store:
return
# If we've been given an etag, then keep the response
if self.cache_etags and "etag" in response_headers:
logger.debug("Caching due to etag")
self.cache.set(
cache_url, self.serializer.dumps(request, response, body=body)
)
# Add to the cache any 301s. We do this before looking that
# the Date headers.
elif response.status == 301:
logger.debug("Caching permanant redirect")
self.cache.set(cache_url, self.serializer.dumps(request, response))
# Add to the cache if the response headers demand it. If there
# is no date header then we can't do anything about expiring
# the cache.
elif "date" in response_headers:
# cache when there is a max-age > 0
if "max-age" in cc and cc["max-age"] > 0:
logger.debug("Caching b/c date exists and max-age > 0")
self.cache.set(
cache_url, self.serializer.dumps(request, response, body=body)
)
# If the request can expire, it means we should cache it
# in the meantime.
elif "expires" in response_headers:
if response_headers["expires"]:
logger.debug("Caching b/c of expires header")
self.cache.set(
cache_url, self.serializer.dumps(request, response, body=body)
) |
<SYSTEM_TASK:>
On a 304 we will get a new set of headers that we want to
<END_TASK>
<USER_TASK:>
Description:
def update_cached_response(self, request, response):
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
""" |
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(request, self.cache.get(cache_url))
if not cached_response:
# we didn't have a cached response
return response
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problmatic due to
# typical assumptions.
excluded_headers = ["content-length"]
cached_response.headers.update(
dict(
(k, v)
for k, v in response.headers.items()
if k.lower() not in excluded_headers
)
)
# we want a 200 b/c we have content via the cache
cached_response.status = 200
# update our cache
self.cache.set(cache_url, self.serializer.dumps(request, cached_response))
return cached_response |
<SYSTEM_TASK:>
Close the file descriptor.
<END_TASK>
<USER_TASK:>
Description:
def close (self):
"""Close the file descriptor.
Calling this method a second time does nothing, but if the file
descriptor was closed elsewhere, :class:`OSError` will be raised.
""" |
if self.child_fd == -1:
return
self.flush()
os.close(self.child_fd)
self.child_fd = -1
self.closed = True |
<SYSTEM_TASK:>
Read from the file descriptor and return the result as a string.
<END_TASK>
<USER_TASK:>
Description:
def read_nonblocking(self, size=1, timeout=-1):
"""
Read from the file descriptor and return the result as a string.
The read_nonblocking method of :class:`SpawnBase` assumes that a call
to os.read will not block (timeout parameter is ignored). This is not
the case for POSIX file-like objects such as sockets and serial ports.
Use :func:`select.select`, timeout is implemented conditionally for
POSIX systems.
:param int size: Read at most *size* bytes.
:param int timeout: Wait timeout seconds for file descriptor to be
ready to read. When -1 (default), use self.timeout. When 0, poll.
:return: String containing the bytes read
""" |
if os.name == 'posix':
if timeout == -1:
timeout = self.timeout
rlist = [self.child_fd]
wlist = []
xlist = []
if self.use_poll:
rlist = poll_ignore_interrupts(rlist, timeout)
else:
rlist, wlist, xlist = select_ignore_interrupts(
rlist, wlist, xlist, timeout
)
if self.child_fd not in rlist:
raise TIMEOUT('Timeout exceeded.')
return super(fdspawn, self).read_nonblocking(size) |
<SYSTEM_TASK:>
Jinja2 keeps internal caches for environments and lexers. These are
<END_TASK>
<USER_TASK:>
Description:
def clear_caches():
"""Jinja2 keeps internal caches for environments and lexers. These are
used so that Jinja2 doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
""" |
from jinja2.environment import _spontaneous_environments
from jinja2.lexer import _lexer_cache
_spontaneous_environments.clear()
_lexer_cache.clear() |
<SYSTEM_TASK:>
Prettyprint an object. Either use the `pretty` library or the
<END_TASK>
<USER_TASK:>
Description:
def pformat(obj, verbose=False):
"""Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
""" |
try:
from pretty import pretty
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
return pformat(obj) |
<SYSTEM_TASK:>
URL escapes a single bytestring or unicode string with the
<END_TASK>
<USER_TASK:>
Description:
def unicode_urlencode(obj, charset='utf-8', for_qs=False):
"""URL escapes a single bytestring or unicode string with the
given charset if applicable to URL safe quoting under all rules
that need to be considered under all supported Python versions.
If non strings are provided they are converted to their unicode
representation first.
""" |
if not isinstance(obj, string_types):
obj = text_type(obj)
if isinstance(obj, text_type):
obj = obj.encode(charset)
safe = not for_qs and b'/' or b''
rv = text_type(url_quote(obj, safe))
if for_qs:
rv = rv.replace('%20', '+')
return rv |
<SYSTEM_TASK:>
Intelligently sets the initial value of autoescaping based on the
<END_TASK>
<USER_TASK:>
Description:
def select_autoescape(enabled_extensions=('html', 'htm', 'xml'),
disabled_extensions=(),
default_for_string=True,
default=False):
"""Intelligently sets the initial value of autoescaping based on the
filename of the template. This is the recommended way to configure
autoescaping if you do not want to write a custom function yourself.
If you want to enable it for all templates created from strings or
for all templates with `.html` and `.xml` extensions::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
enabled_extensions=('html', 'xml'),
default_for_string=True,
))
Example configuration to turn it on at all times except if the template
ends with `.txt`::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
disabled_extensions=('txt',),
default_for_string=True,
default=True,
))
The `enabled_extensions` is an iterable of all the extensions that
autoescaping should be enabled for. Likewise `disabled_extensions` is
a list of all templates it should be disabled for. If a template is
loaded from a string then the default from `default_for_string` is used.
If nothing matches then the initial value of autoescaping is set to the
value of `default`.
For security reasons this function operates case insensitive.
.. versionadded:: 2.9
""" |
enabled_patterns = tuple('.' + x.lstrip('.').lower()
for x in enabled_extensions)
disabled_patterns = tuple('.' + x.lstrip('.').lower()
for x in disabled_extensions)
def autoescape(template_name):
if template_name is None:
return default_for_string
template_name = template_name.lower()
if template_name.endswith(enabled_patterns):
return True
if template_name.endswith(disabled_patterns):
return False
return default
return autoescape |
<SYSTEM_TASK:>
Set `default` if the key is not in the cache otherwise
<END_TASK>
<USER_TASK:>
Description:
def setdefault(self, key, default=None):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
""" |
self._wlock.acquire()
try:
try:
return self[key]
except KeyError:
self[key] = default
return default
finally:
self._wlock.release() |
<SYSTEM_TASK:>
Hardcoded license URLs. Check when updating if those are still needed
<END_TASK>
<USER_TASK:>
Description:
def license_fallback(vendor_dir, sdist_name):
"""Hardcoded license URLs. Check when updating if those are still needed""" |
libname = libname_from_dir(sdist_name)
if libname not in HARDCODED_LICENSE_URLS:
raise ValueError('No hardcoded URL for {} license'.format(libname))
url = HARDCODED_LICENSE_URLS[libname]
_, _, name = url.rpartition('/')
dest = license_destination(vendor_dir, libname, name)
r = requests.get(url, allow_redirects=True)
log('Downloading {}'.format(url))
r.raise_for_status()
dest.write_bytes(r.content) |
<SYSTEM_TASK:>
Reconstruct the library name without it's version
<END_TASK>
<USER_TASK:>
Description:
def libname_from_dir(dirname):
"""Reconstruct the library name without it's version""" |
parts = []
for part in dirname.split('-'):
if part[0].isdigit():
break
parts.append(part)
return '-'.join(parts) |
<SYSTEM_TASK:>
Convert Pipfile.lock hash lines into InstallRequirement option format.
<END_TASK>
<USER_TASK:>
Description:
def _convert_hashes(values):
"""Convert Pipfile.lock hash lines into InstallRequirement option format.
The option format uses a str-list mapping. Keys are hash algorithms, and
the list contains all values of that algorithm.
""" |
hashes = {}
if not values:
return hashes
for value in values:
try:
name, value = value.split(":", 1)
except ValueError:
name = "sha256"
if name not in hashes:
hashes[name] = []
hashes[name].append(value)
return hashes |
<SYSTEM_TASK:>
Hack to hide noise generated by `setup.py develop`.
<END_TASK>
<USER_TASK:>
Description:
def _suppress_distutils_logs():
"""Hack to hide noise generated by `setup.py develop`.
There isn't a good way to suppress them now, so let's monky-patch.
See https://bugs.python.org/issue25392.
""" |
f = distutils.log.Log._log
def _log(log, level, msg, args):
if level >= distutils.log.ERROR:
f(log, level, msg, args)
distutils.log.Log._log = _log
yield
distutils.log.Log._log = f |
<SYSTEM_TASK:>
Find this package's .egg-info directory.
<END_TASK>
<USER_TASK:>
Description:
def _find_egg_info(ireq):
"""Find this package's .egg-info directory.
Due to how sdists are designed, the .egg-info directory cannot be reliably
found without running setup.py to aggregate all configurations. This
function instead uses some heuristics to locate the egg-info directory
that most likely represents this package.
The best .egg-info directory's path is returned as a string. None is
returned if no matches can be found.
""" |
root = ireq.setup_py_dir
directory_iterator = _iter_egg_info_directories(root, ireq.name)
try:
top_egg_info = next(directory_iterator)
except StopIteration: # No egg-info found. Wat.
return None
directory_iterator = itertools.chain([top_egg_info], directory_iterator)
# Read the sdist's PKG-INFO to determine which egg_info is best.
pkg_info = _read_pkg_info(root)
# PKG-INFO not readable. Just return whatever comes first, I guess.
if pkg_info is None:
return top_egg_info
# Walk the sdist to find the egg-info with matching PKG-INFO.
for directory in directory_iterator:
egg_pkg_info = _read_pkg_info(directory)
if egg_pkg_info == pkg_info:
return directory
# Nothing matches...? Use the first one we found, I guess.
return top_egg_info |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.