text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Add a file to the manifest.
<END_TASK>
<USER_TASK:>
Description:
def add(self, item):
"""
Add a file to the manifest.
:param item: The pathname to add. This can be relative to the base.
""" |
if not item.startswith(self.prefix):
item = os.path.join(self.base, item)
self.files.add(os.path.normpath(item)) |
<SYSTEM_TASK:>
Return sorted files in directory order
<END_TASK>
<USER_TASK:>
Description:
def sorted(self, wantdirs=False):
"""
Return sorted files in directory order
""" |
def add_dir(dirs, d):
dirs.add(d)
logger.debug('add_dir added %s', d)
if d != self.base:
parent, _ = os.path.split(d)
assert parent not in ('', '/')
add_dir(dirs, parent)
result = set(self.files) # make a copy!
if wantdirs:
dirs = set()
for f in result:
add_dir(dirs, os.path.dirname(f))
result |= dirs
return [os.path.join(*path_tuple) for path_tuple in
sorted(os.path.split(path) for path in result)] |
<SYSTEM_TASK:>
Process a directive which either adds some files from ``allfiles`` to
<END_TASK>
<USER_TASK:>
Description:
def process_directive(self, directive):
"""
Process a directive which either adds some files from ``allfiles`` to
``files``, or removes some files from ``files``.
:param directive: The directive to process. This should be in a format
compatible with distutils ``MANIFEST.in`` files:
http://docs.python.org/distutils/sourcedist.html#commands
""" |
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dirpattern).
action, patterns, thedir, dirpattern = self._parse_directive(directive)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
for pattern in patterns:
if not self._include_pattern(pattern, anchor=True):
logger.warning('no files found matching %r', pattern)
elif action == 'exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, anchor=True)
#if not found:
# logger.warning('no previously-included files '
# 'found matching %r', pattern)
elif action == 'global-include':
for pattern in patterns:
if not self._include_pattern(pattern, anchor=False):
logger.warning('no files found matching %r '
'anywhere in distribution', pattern)
elif action == 'global-exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, anchor=False)
#if not found:
# logger.warning('no previously-included files '
# 'matching %r found anywhere in '
# 'distribution', pattern)
elif action == 'recursive-include':
for pattern in patterns:
if not self._include_pattern(pattern, prefix=thedir):
logger.warning('no files found matching %r '
'under directory %r', pattern, thedir)
elif action == 'recursive-exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, prefix=thedir)
#if not found:
# logger.warning('no previously-included files '
# 'matching %r found under directory %r',
# pattern, thedir)
elif action == 'graft':
if not self._include_pattern(None, prefix=dirpattern):
logger.warning('no directories found matching %r',
dirpattern)
elif action == 'prune':
if not self._exclude_pattern(None, prefix=dirpattern):
logger.warning('no previously-included directories found '
'matching %r', dirpattern)
else: # pragma: no cover
# This should never happen, as it should be caught in
# _parse_template_line
raise DistlibException(
'invalid action %r' % action) |
<SYSTEM_TASK:>
Translate a shell-like glob pattern to a regular expression.
<END_TASK>
<USER_TASK:>
Description:
def _glob_to_re(self, pattern):
"""Translate a shell-like glob pattern to a regular expression.
Return a string containing the regex. Differs from
'fnmatch.translate()' in that '*' does not match "special characters"
(which are platform-specific).
""" |
pattern_re = fnmatch.translate(pattern)
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
# and by extension they shouldn't match such "special characters" under
# any OS. So change all non-escaped dots in the RE to match any
# character except the special characters (currently: just os.sep).
sep = os.sep
if os.sep == '\\':
# we're using a regex to manipulate a regex, so we need
# to escape the backslash twice
sep = r'\\\\'
escaped = r'\1[^%s]' % sep
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
return pattern_re |
<SYSTEM_TASK:>
Extract the cookies from the response into a CookieJar.
<END_TASK>
<USER_TASK:>
Description:
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
""" |
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req) |
<SYSTEM_TASK:>
Produce an appropriate Cookie header string to be sent with `request`, or None.
<END_TASK>
<USER_TASK:>
Description:
def get_cookie_header(jar, request):
"""
Produce an appropriate Cookie header string to be sent with `request`, or None.
:rtype: str
""" |
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie') |
<SYSTEM_TASK:>
Unsets a cookie by name, by default over all domains and paths.
<END_TASK>
<USER_TASK:>
Description:
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
""" |
clearables = []
for cookie in cookiejar:
if cookie.name != name:
continue
if domain is not None and domain != cookie.domain:
continue
if path is not None and path != cookie.path:
continue
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name) |
<SYSTEM_TASK:>
Add cookies to cookiejar and returns a merged CookieJar.
<END_TASK>
<USER_TASK:>
Description:
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
:rtype: CookieJar
""" |
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar |
<SYSTEM_TASK:>
Utility method to list all the domains in the jar.
<END_TASK>
<USER_TASK:>
Description:
def list_domains(self):
"""Utility method to list all the domains in the jar.""" |
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains |
<SYSTEM_TASK:>
Utility method to list all the paths in the jar.
<END_TASK>
<USER_TASK:>
Description:
def list_paths(self):
"""Utility method to list all the paths in the jar.""" |
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths |
<SYSTEM_TASK:>
Returns True if there are multiple domains in the jar.
<END_TASK>
<USER_TASK:>
Description:
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise.
:rtype: bool
""" |
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False |
<SYSTEM_TASK:>
Updates this jar with cookies from another CookieJar or dict-like
<END_TASK>
<USER_TASK:>
Description:
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like""" |
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other) |
<SYSTEM_TASK:>
Requests uses this method internally to get cookie values.
<END_TASK>
<USER_TASK:>
Description:
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value
""" |
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) |
<SYSTEM_TASK:>
Pull a value from the dict and convert to int
<END_TASK>
<USER_TASK:>
Description:
def to_int(d, key, default_to_zero=False, default=None, required=True):
"""Pull a value from the dict and convert to int
:param default_to_zero: If the value is None or empty, treat it as zero
:param default: If the value is missing in the dict use this default
""" |
value = d.get(key) or default
if (value in ["", None]) and default_to_zero:
return 0
if value is None:
if required:
raise ParseError("Unable to read %s from %s" % (key, d))
else:
return int(value) |
<SYSTEM_TASK:>
Parses ISO 8601 dates into datetime objects
<END_TASK>
<USER_TASK:>
Description:
def parse_date(datestring, default_timezone=UTC):
"""Parses ISO 8601 dates into datetime objects
The timezone is parsed from the date string. However it is quite common to
have dates without a timezone (not strictly correct). In this case the
default timezone specified in default_timezone is used. This is UTC by
default.
:param datestring: The date to parse as a string
:param default_timezone: A datetime tzinfo instance to use when no timezone
is specified in the datestring. If this is set to
None then a naive datetime object is returned.
:returns: A datetime.datetime instance
:raises: ParseError when there is a problem parsing the date or
constructing the datetime instance.
""" |
if not isinstance(datestring, _basestring):
raise ParseError("Expecting a string %r" % datestring)
m = ISO8601_REGEX.match(datestring)
if not m:
raise ParseError("Unable to parse date string %r" % datestring)
groups = m.groupdict()
tz = parse_timezone(groups, default_timezone=default_timezone)
groups["second_fraction"] = int(Decimal("0.%s" % (groups["second_fraction"] or 0)) * Decimal("1000000.0"))
try:
return datetime.datetime(
year=to_int(groups, "year"),
month=to_int(groups, "month", default=to_int(groups, "monthdash", required=False, default=1)),
day=to_int(groups, "day", default=to_int(groups, "daydash", required=False, default=1)),
hour=to_int(groups, "hour", default_to_zero=True),
minute=to_int(groups, "minute", default_to_zero=True),
second=to_int(groups, "second", default_to_zero=True),
microsecond=groups["second_fraction"],
tzinfo=tz,
)
except Exception as e:
raise ParseError(e) |
<SYSTEM_TASK:>
Re-map the characters in the string according to UTS46 processing.
<END_TASK>
<USER_TASK:>
Description:
def uts46_remap(domain, std3_rules=True, transitional=False):
"""Re-map the characters in the string according to UTS46 processing.""" |
from .uts46data import uts46data
output = u""
try:
for pos, char in enumerate(domain):
code_point = ord(char)
uts46row = uts46data[code_point if code_point < 256 else
bisect.bisect_left(uts46data, (code_point, "Z")) - 1]
status = uts46row[1]
replacement = uts46row[2] if len(uts46row) == 3 else None
if (status == "V" or
(status == "D" and not transitional) or
(status == "3" and not std3_rules and replacement is None)):
output += char
elif replacement is not None and (status == "M" or
(status == "3" and not std3_rules) or
(status == "D" and transitional)):
output += replacement
elif status != "I":
raise IndexError()
return unicodedata.normalize("NFC", output)
except IndexError:
raise InvalidCodepoint(
"Codepoint {0} not allowed at position {1} in {2}".format(
_unot(code_point), pos + 1, repr(domain))) |
<SYSTEM_TASK:>
Return a dict with the Python implementation and version.
<END_TASK>
<USER_TASK:>
Description:
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
""" |
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version} |
<SYSTEM_TASK:>
Process text, writing it to the virtual screen while handling
<END_TASK>
<USER_TASK:>
Description:
def write (self, s):
"""Process text, writing it to the virtual screen while handling
ANSI escape codes.
""" |
if isinstance(s, bytes):
s = self._decode(s)
for c in s:
self.process(c) |
<SYSTEM_TASK:>
Returns the default stream encoding if not found.
<END_TASK>
<USER_TASK:>
Description:
def get_best_encoding(stream):
"""Returns the default stream encoding if not found.""" |
rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv |
<SYSTEM_TASK:>
Get the size of the terminal window.
<END_TASK>
<USER_TASK:>
Description:
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdout__ is queried
by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
""" |
# Try the environment first
try:
columns = int(os.environ["COLUMNS"])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ["LINES"])
except (KeyError, ValueError):
lines = 0
# Only query if necessary
if columns <= 0 or lines <= 0:
try:
size = _get_terminal_size(sys.__stdout__.fileno())
except (NameError, OSError):
size = terminal_size(*fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return terminal_size(columns, lines) |
<SYSTEM_TASK:>
If a position is provided, move file to that point.
<END_TASK>
<USER_TASK:>
Description:
def set_file_position(body, pos):
"""
If a position is provided, move file to that point.
Otherwise, we'll attempt to record a position for future use.
""" |
if pos is not None:
rewind_body(body, pos)
elif getattr(body, 'tell', None) is not None:
try:
pos = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body.
pos = _FAILEDTELL
return pos |
<SYSTEM_TASK:>
Attempt to rewind body to a certain position.
<END_TASK>
<USER_TASK:>
Description:
def rewind_body(body, body_pos):
"""
Attempt to rewind body to a certain position.
Primarily used for request redirects and retries.
:param body:
File-like object that supports seek.
:param int pos:
Position to seek to in file.
""" |
body_seek = getattr(body, 'seek', None)
if body_seek is not None and isinstance(body_pos, integer_types):
try:
body_seek(body_pos)
except (IOError, OSError):
raise UnrewindableBodyError("An error occurred when rewinding request "
"body for redirect/retry.")
elif body_pos is _FAILEDTELL:
raise UnrewindableBodyError("Unable to record file position for rewinding "
"request body during a redirect/retry.")
else:
raise ValueError("body_pos must be of type integer, "
"instead it was %s." % type(body_pos)) |
<SYSTEM_TASK:>
Helper for clearing all the keys in a database. Use with
<END_TASK>
<USER_TASK:>
Description:
def clear(self):
"""Helper for clearing all the keys in a database. Use with
caution!""" |
for key in self.conn.keys():
self.conn.delete(key) |
<SYSTEM_TASK:>
Returns a tuple of `frozenset`s of classes and attributes.
<END_TASK>
<USER_TASK:>
Description:
def _split_what(what):
"""
Returns a tuple of `frozenset`s of classes and attributes.
""" |
return (
frozenset(cls for cls in what if isclass(cls)),
frozenset(cls for cls in what if isinstance(cls, Attribute)),
) |
<SYSTEM_TASK:>
``asdict`` only works on attrs instances, this works on anything.
<END_TASK>
<USER_TASK:>
Description:
def _asdict_anything(val, filter, dict_factory, retain_collection_types):
"""
``asdict`` only works on attrs instances, this works on anything.
""" |
if getattr(val.__class__, "__attrs_attrs__", None) is not None:
# Attrs class.
rv = asdict(val, True, filter, dict_factory, retain_collection_types)
elif isinstance(val, (tuple, list, set)):
cf = val.__class__ if retain_collection_types is True else list
rv = cf(
[
_asdict_anything(
i, filter, dict_factory, retain_collection_types
)
for i in val
]
)
elif isinstance(val, dict):
df = dict_factory
rv = df(
(
_asdict_anything(kk, filter, df, retain_collection_types),
_asdict_anything(vv, filter, df, retain_collection_types),
)
for kk, vv in iteritems(val)
)
else:
rv = val
return rv |
<SYSTEM_TASK:>
Interpret a marker and return a result depending on environment.
<END_TASK>
<USER_TASK:>
Description:
def interpret(marker, execution_context=None):
"""
Interpret a marker and return a result depending on environment.
:param marker: The marker to interpret.
:type marker: str
:param execution_context: The context used for name lookup.
:type execution_context: mapping
""" |
try:
expr, rest = parse_marker(marker)
except Exception as e:
raise SyntaxError('Unable to interpret marker syntax: %s: %s' % (marker, e))
if rest and rest[0] != '#':
raise SyntaxError('unexpected trailing data in marker: %s: %s' % (marker, rest))
context = dict(DEFAULT_CONTEXT)
if execution_context:
context.update(execution_context)
return evaluator.evaluate(expr, context) |
<SYSTEM_TASK:>
Merges the given Item with the last one currently in the given Container if
<END_TASK>
<USER_TASK:>
Description:
def _merge_ws(self, item, container): # type: (Item, Container) -> bool
"""
Merges the given Item with the last one currently in the given Container if
both are whitespace items.
Returns True if the items were merged.
""" |
last = container.last_item()
if not last:
return False
if not isinstance(item, Whitespace) or not isinstance(last, Whitespace):
return False
start = self._idx - (len(last.s) + len(item.s))
container.body[-1] = (
container.body[-1][0],
Whitespace(self._src[start : self._idx]),
)
return True |
<SYSTEM_TASK:>
Returns whether a key is strictly a child of another key.
<END_TASK>
<USER_TASK:>
Description:
def _is_child(self, parent, child): # type: (str, str) -> bool
"""
Returns whether a key is strictly a child of another key.
AoT siblings are not considered children of one another.
""" |
parent_parts = tuple(self._split_table_name(parent))
child_parts = tuple(self._split_table_name(child))
if parent_parts == child_parts:
return False
return parent_parts == child_parts[: len(parent_parts)] |
<SYSTEM_TASK:>
Attempts to parse the next item and returns it, along with its key
<END_TASK>
<USER_TASK:>
Description:
def _parse_item(self): # type: () -> Optional[Tuple[Optional[Key], Item]]
"""
Attempts to parse the next item and returns it, along with its key
if the item is value-like.
""" |
self.mark()
with self._state as state:
while True:
c = self._current
if c == "\n":
# Found a newline; Return all whitespace found up to this point.
self.inc()
return None, Whitespace(self.extract())
elif c in " \t\r":
# Skip whitespace.
if not self.inc():
return None, Whitespace(self.extract())
elif c == "#":
# Found a comment, parse it
indent = self.extract()
cws, comment, trail = self._parse_comment_trail()
return None, Comment(Trivia(indent, cws, comment, trail))
elif c == "[":
# Found a table, delegate to the calling function.
return
else:
# Begining of a KV pair.
# Return to beginning of whitespace so it gets included
# as indentation for the KV about to be parsed.
state.restore = True
break
return self._parse_key_value(True) |
<SYSTEM_TASK:>
Parses a key enclosed in either single or double quotes.
<END_TASK>
<USER_TASK:>
Description:
def _parse_quoted_key(self): # type: () -> Key
"""
Parses a key enclosed in either single or double quotes.
""" |
quote_style = self._current
key_type = None
dotted = False
for t in KeyType:
if t.value == quote_style:
key_type = t
break
if key_type is None:
raise RuntimeError("Should not have entered _parse_quoted_key()")
self.inc()
self.mark()
while self._current != quote_style and self.inc():
pass
key = self.extract()
if self._current == ".":
self.inc()
dotted = True
key += "." + self._parse_key().as_string()
key_type = KeyType.Bare
else:
self.inc()
return Key(key, key_type, "", dotted) |
<SYSTEM_TASK:>
Attempts to parse a value at the current position.
<END_TASK>
<USER_TASK:>
Description:
def _parse_value(self): # type: () -> Item
"""
Attempts to parse a value at the current position.
""" |
self.mark()
c = self._current
trivia = Trivia()
if c == StringType.SLB.value:
return self._parse_basic_string()
elif c == StringType.SLL.value:
return self._parse_literal_string()
elif c == BoolType.TRUE.value[0]:
return self._parse_true()
elif c == BoolType.FALSE.value[0]:
return self._parse_false()
elif c == "[":
return self._parse_array()
elif c == "{":
return self._parse_inline_table()
elif c in "+-" or self._peek(4) in {
"+inf",
"-inf",
"inf",
"+nan",
"-nan",
"nan",
}:
# Number
while self._current not in " \t\n\r#,]}" and self.inc():
pass
raw = self.extract()
item = self._parse_number(raw, trivia)
if item is not None:
return item
raise self.parse_error(InvalidNumberError)
elif c in string.digits:
# Integer, Float, Date, Time or DateTime
while self._current not in " \t\n\r#,]}" and self.inc():
pass
raw = self.extract()
m = RFC_3339_LOOSE.match(raw)
if m:
if m.group(1) and m.group(5):
# datetime
try:
return DateTime(parse_rfc3339(raw), trivia, raw)
except ValueError:
raise self.parse_error(InvalidDateTimeError)
if m.group(1):
try:
return Date(parse_rfc3339(raw), trivia, raw)
except ValueError:
raise self.parse_error(InvalidDateError)
if m.group(5):
try:
return Time(parse_rfc3339(raw), trivia, raw)
except ValueError:
raise self.parse_error(InvalidTimeError)
item = self._parse_number(raw, trivia)
if item is not None:
return item
raise self.parse_error(InvalidNumberError)
else:
raise self.parse_error(UnexpectedCharError, c) |
<SYSTEM_TASK:>
Parses all siblings of the provided table first and bundles them into
<END_TASK>
<USER_TASK:>
Description:
def _parse_aot(self, first, name_first): # type: (Table, str) -> AoT
"""
Parses all siblings of the provided table first and bundles them into
an AoT.
""" |
payload = [first]
self._aot_stack.append(name_first)
while not self.end():
is_aot_next, name_next = self._peek_table()
if is_aot_next and name_next == name_first:
_, table = self._parse_table(name_first)
payload.append(table)
else:
break
self._aot_stack.pop()
return AoT(payload, parsed=True) |
<SYSTEM_TASK:>
Peeks ahead n characters.
<END_TASK>
<USER_TASK:>
Description:
def _peek(self, n): # type: (int) -> str
"""
Peeks ahead n characters.
n is the max number of characters that will be peeked.
""" |
# we always want to restore after exiting this scope
with self._state(restore=True):
buf = ""
for _ in range(n):
if self._current not in " \t\n\r#,]}":
buf += self._current
self.inc()
continue
break
return buf |
<SYSTEM_TASK:>
Given a string and an iterable of delimiters, split on the first found
<END_TASK>
<USER_TASK:>
Description:
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
""" |
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim |
<SYSTEM_TASK:>
Absolute path including the query string.
<END_TASK>
<USER_TASK:>
Description:
def request_uri(self):
"""Absolute path including the query string.""" |
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri |
<SYSTEM_TASK:>
Network location including host and port
<END_TASK>
<USER_TASK:>
Description:
def netloc(self):
"""Network location including host and port""" |
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host |
<SYSTEM_TASK:>
Convert self into a url
<END_TASK>
<USER_TASK:>
Description:
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:[email protected]:80/path?query#fragment'
""" |
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url |
<SYSTEM_TASK:>
Convert a tree to a genshi tree
<END_TASK>
<USER_TASK:>
Description:
def to_genshi(walker):
"""Convert a tree to a genshi tree
:arg walker: the treewalker to use to walk the tree to convert it
:returns: generator of genshi nodes
""" |
text = []
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text:
yield TEXT, "".join(text), (None, -1, -1) |
<SYSTEM_TASK:>
Parse a requirements formatted file.
<END_TASK>
<USER_TASK:>
Description:
def parse_requirements(file_):
"""Parse a requirements formatted file.
Traverse a string until a delimiter is detected, then split at said
delimiter, get module name by element index, create a dict consisting of
module:version, and add dict to list of parsed modules.
Args:
file_: File to parse.
Raises:
OSerror: If there's any issues accessing the file.
Returns:
tuple: The contents of the file, excluding comments.
""" |
modules = []
delim = ["<", ">", "=", "!", "~"] # https://www.python.org/dev/peps/pep-0508/#complete-grammar
try:
f = open_func(file_, "r")
except OSError:
logging.error("Failed on file: {}".format(file_))
raise
else:
data = [x.strip() for x in f.readlines() if x != "\n"]
finally:
f.close()
data = [x for x in data if x[0].isalpha()]
for x in data:
if not any([y in x for y in delim]): # Check for modules w/o a specifier.
modules.append({"name": x, "version": None})
for y in x:
if y in delim:
module = x.split(y)
module_name = module[0]
module_version = module[-1].replace("=", "")
module = {"name": module_name, "version": module_version}
if module not in modules:
modules.append(module)
break
return modules |
<SYSTEM_TASK:>
Compare modules in a file to imported modules in a project.
<END_TASK>
<USER_TASK:>
Description:
def compare_modules(file_, imports):
"""Compare modules in a file to imported modules in a project.
Args:
file_ (str): File to parse for modules to be compared.
imports (tuple): Modules being imported in the project.
Returns:
tuple: The modules not imported in the project, but do exist in the
specified file.
""" |
modules = parse_requirements(file_)
imports = [imports[i]["name"] for i in range(len(imports))]
modules = [modules[i]["name"] for i in range(len(modules))]
modules_not_imported = set(modules) - set(imports)
return modules_not_imported |
<SYSTEM_TASK:>
Display the difference between modules in a file and imported modules.
<END_TASK>
<USER_TASK:>
Description:
def diff(file_, imports):
"""Display the difference between modules in a file and imported modules.""" |
modules_not_imported = compare_modules(file_, imports)
logging.info("The following modules are in {} but do not seem to be imported: "
"{}".format(file_, ", ".join(x for x in modules_not_imported))) |
<SYSTEM_TASK:>
Remove modules that aren't imported in project from file.
<END_TASK>
<USER_TASK:>
Description:
def clean(file_, imports):
"""Remove modules that aren't imported in project from file.""" |
modules_not_imported = compare_modules(file_, imports)
re_remove = re.compile("|".join(modules_not_imported))
to_write = []
try:
f = open_func(file_, "r+")
except OSError:
logging.error("Failed on file: {}".format(file_))
raise
else:
for i in f.readlines():
if re_remove.match(i) is None:
to_write.append(i)
f.seek(0)
f.truncate()
for i in to_write:
f.write(i)
finally:
f.close()
logging.info("Successfully cleaned up requirements in " + file_) |
<SYSTEM_TASK:>
Helper to deprecate existing functionality.
<END_TASK>
<USER_TASK:>
Description:
def deprecated(reason, replacement, gone_in, issue=None):
# type: (str, Optional[str], Optional[str], Optional[int]) -> None
"""Helper to deprecate existing functionality.
reason:
Textual reason shown to the user about why this functionality has
been deprecated.
replacement:
Textual suggestion shown to the user about what alternative
functionality they can use.
gone_in:
The version of pip does this functionality should get removed in.
Raises errors if pip's current version is greater than or equal to
this.
issue:
Issue number on the tracker that would serve as a useful place for
users to find related discussion and provide feedback.
Always pass replacement, gone_in and issue as keyword arguments for clarity
at the call site.
""" |
# Construct a nice message.
# This is purposely eagerly formatted as we want it to appear as if someone
# typed this entire message out.
message = "DEPRECATION: " + reason
if replacement is not None:
message += " A possible replacement is {}.".format(replacement)
if issue is not None:
url = "https://github.com/pypa/pip/issues/" + str(issue)
message += " You can find discussion regarding this at {}.".format(url)
# Raise as an error if it has to be removed.
if gone_in is not None and parse(current_version) >= parse(gone_in):
raise PipDeprecationWarning(message)
warnings.warn(message, category=PipDeprecationWarning, stacklevel=2) |
<SYSTEM_TASK:>
Add "metadata" to candidates based on the dependency tree.
<END_TASK>
<USER_TASK:>
Description:
def set_metadata(candidates, traces, dependencies, pythons):
"""Add "metadata" to candidates based on the dependency tree.
Metadata for a candidate includes markers and a specifier for Python
version requirements.
:param candidates: A key-candidate mapping. Candidates in the mapping will
have their markers set.
:param traces: A graph trace (produced by `traces.trace_graph`) providing
information about dependency relationships between candidates.
:param dependencies: A key-collection mapping containing what dependencies
each candidate in `candidates` requested.
:param pythons: A key-str mapping containing Requires-Python information
of each candidate.
Keys in mappings and entries in the trace are identifiers of a package, as
implemented by the `identify` method of the resolver's provider.
The candidates are modified in-place.
""" |
metasets_mapping = _calculate_metasets_mapping(
dependencies, pythons, copy.deepcopy(traces),
)
for key, candidate in candidates.items():
candidate.markers = _format_metasets(metasets_mapping[key]) |
<SYSTEM_TASK:>
Marks a callback as wanting to receive the current context
<END_TASK>
<USER_TASK:>
Description:
def pass_context(f):
"""Marks a callback as wanting to receive the current context
object as first argument.
""" |
def new_func(*args, **kwargs):
return f(get_current_context(), *args, **kwargs)
return update_wrapper(new_func, f) |
<SYSTEM_TASK:>
Shortcut for password prompts.
<END_TASK>
<USER_TASK:>
Description:
def password_option(*param_decls, **attrs):
"""Shortcut for password prompts.
This is equivalent to decorating a function with :func:`option` with
the following parameters::
@click.command()
@click.option('--password', prompt=True, confirmation_prompt=True,
hide_input=True)
def changeadmin(password):
pass
""" |
def decorator(f):
attrs.setdefault('prompt', True)
attrs.setdefault('confirmation_prompt', True)
attrs.setdefault('hide_input', True)
return option(*(param_decls or ('--password',)), **attrs)(f)
return decorator |
<SYSTEM_TASK:>
Checks whether pkg was installed by pip
<END_TASK>
<USER_TASK:>
Description:
def was_installed_by_pip(pkg):
# type: (str) -> bool
"""Checks whether pkg was installed by pip
This is used not to display the upgrade message when pip is in fact
installed by system package manager, such as dnf on Fedora.
""" |
try:
dist = pkg_resources.get_distribution(pkg)
return (dist.has_metadata('INSTALLER') and
'pip' in dist.get_metadata_lines('INSTALLER'))
except pkg_resources.DistributionNotFound:
return False |
<SYSTEM_TASK:>
Check for an update for pip.
<END_TASK>
<USER_TASK:>
Description:
def pip_version_check(session, options):
# type: (PipSession, optparse.Values) -> None
"""Check for an update for pip.
Limit the frequency of checks to once per week. State is stored either in
the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
of the pip script path.
""" |
installed_version = get_installed_version("pip")
if not installed_version:
return
pip_version = packaging_version.parse(installed_version)
pypi_version = None
try:
state = SelfCheckState(cache_dir=options.cache_dir)
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if "last_check" in state.state and "pypi_version" in state.state:
last_check = datetime.datetime.strptime(
state.state["last_check"],
SELFCHECK_DATE_FMT
)
if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60:
pypi_version = state.state["pypi_version"]
# Refresh the version if we need to or just see if we need to warn
if pypi_version is None:
# Lets use PackageFinder to see what the latest pip version is
finder = PackageFinder(
find_links=options.find_links,
index_urls=[options.index_url] + options.extra_index_urls,
allow_all_prereleases=False, # Explicitly set to False
trusted_hosts=options.trusted_hosts,
session=session,
)
all_candidates = finder.find_all_candidates("pip")
if not all_candidates:
return
pypi_version = str(
max(all_candidates, key=lambda c: c.version).version
)
# save that we've performed a check
state.save(pypi_version, current_time)
remote_version = packaging_version.parse(pypi_version)
# Determine if our pypi_version is older
if (pip_version < remote_version and
pip_version.base_version != remote_version.base_version and
was_installed_by_pip('pip')):
# Advise "python -m pip" on Windows to avoid issues
# with overwriting pip.exe.
if WINDOWS:
pip_cmd = "python -m pip"
else:
pip_cmd = "pip"
logger.warning(
"You are using pip version %s, however version %s is "
"available.\nYou should consider upgrading via the "
"'%s install --upgrade pip' command.",
pip_version, pypi_version, pip_cmd
)
except Exception:
logger.debug(
"There was an error checking the latest version of pip",
exc_info=True,
) |
<SYSTEM_TASK:>
Return sys.version_info-like tuple for use in decrementing the minor
<END_TASK>
<USER_TASK:>
Description:
def get_impl_version_info():
# type: () -> Tuple[int, ...]
"""Return sys.version_info-like tuple for use in decrementing the minor
version.""" |
if get_abbr_impl() == 'pp':
# as per https://github.com/pypa/pip/issues/2882
# attrs exist only on pypy
return (sys.version_info[0],
sys.pypy_version_info.major, # type: ignore
sys.pypy_version_info.minor) # type: ignore
else:
return sys.version_info[0], sys.version_info[1] |
<SYSTEM_TASK:>
Replace nonexistent paths that look like they refer to a member of a zip
<END_TASK>
<USER_TASK:>
Description:
def extract_zipped_paths(path):
"""Replace nonexistent paths that look like they refer to a member of a zip
archive with the location of an extracted copy of the target, or else
just return the provided path unchanged.
""" |
if os.path.exists(path):
# this is already a valid path, no need to do anything further
return path
# find the first valid part of the provided path and treat that as a zip archive
# assume the rest of the path is the name of a member in the archive
archive, member = os.path.split(path)
while archive and not os.path.exists(archive):
archive, prefix = os.path.split(archive)
member = '/'.join([prefix, member])
if not zipfile.is_zipfile(archive):
return path
zip_file = zipfile.ZipFile(archive)
if member not in zip_file.namelist():
return path
# we have a valid zip archive and a valid member of that archive
tmp = tempfile.gettempdir()
extracted_path = os.path.join(tmp, *member.split('/'))
if not os.path.exists(extracted_path):
extracted_path = zip_file.extract(member, path=tmp)
return extracted_path |
<SYSTEM_TASK:>
Returns content type and parameters from given header
<END_TASK>
<USER_TASK:>
Description:
def _parse_content_type_header(header):
"""Returns content type and parameters from given header
:param header: string
:return: tuple containing content type and dictionary of
parameters
""" |
tokens = header.split(';')
content_type, params = tokens[0].strip(), tokens[1:]
params_dict = {}
items_to_strip = "\"' "
for param in params:
param = param.strip()
if param:
key, value = param, True
index_of_equals = param.find("=")
if index_of_equals != -1:
key = param[:index_of_equals].strip(items_to_strip)
value = param[index_of_equals + 1:].strip(items_to_strip)
params_dict[key.lower()] = value
return content_type, params_dict |
<SYSTEM_TASK:>
Iterate over slices of a string.
<END_TASK>
<USER_TASK:>
Description:
def iter_slices(string, slice_length):
"""Iterate over slices of a string.""" |
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length |
<SYSTEM_TASK:>
Returns the requested content back in unicode.
<END_TASK>
<USER_TASK:>
Description:
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
""" |
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content |
<SYSTEM_TASK:>
Re-quote the given URI.
<END_TASK>
<USER_TASK:>
Description:
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
""" |
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent) |
<SYSTEM_TASK:>
This function allows you to check if an IP belongs to a network subnet
<END_TASK>
<USER_TASK:>
Description:
def address_in_network(ip, net):
"""This function allows you to check if an IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
""" |
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask) |
<SYSTEM_TASK:>
Very simple check of the cidr format in no_proxy variable.
<END_TASK>
<USER_TASK:>
Description:
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
""" |
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True |
<SYSTEM_TASK:>
Set the environment variable 'env_name' to 'value'
<END_TASK>
<USER_TASK:>
Description:
def set_environ(env_name, value):
"""Set the environment variable 'env_name' to 'value'
Save previous value, yield, and then restore the previous value stored in
the environment variable 'env_name'.
If 'value' is None, do nothing""" |
value_changed = value is not None
if value_changed:
old_value = os.environ.get(env_name)
os.environ[env_name] = value
try:
yield
finally:
if value_changed:
if old_value is None:
del os.environ[env_name]
else:
os.environ[env_name] = old_value |
<SYSTEM_TASK:>
Select a proxy for the url, if applicable.
<END_TASK>
<USER_TASK:>
Description:
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
""" |
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get(urlparts.scheme, proxies.get('all'))
proxy_keys = [
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
'all://' + urlparts.hostname,
'all',
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy |
<SYSTEM_TASK:>
Given a URL that may or may not have a scheme, prepend the given scheme.
<END_TASK>
<USER_TASK:>
Description:
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
""" |
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment)) |
<SYSTEM_TASK:>
Given a url with authentication components, extract them into a tuple of
<END_TASK>
<USER_TASK:>
Description:
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
""" |
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth |
<SYSTEM_TASK:>
Verifies that header value is a string which doesn't contain
<END_TASK>
<USER_TASK:>
Description:
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
""" |
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Value for header {%s: %s} must be of type str or "
"bytes, not %s" % (name, value, type(value))) |
<SYSTEM_TASK:>
Given a url remove the fragment and the authentication part.
<END_TASK>
<USER_TASK:>
Description:
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
""" |
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, '')) |
<SYSTEM_TASK:>
Move file pointer back to its recorded starting position
<END_TASK>
<USER_TASK:>
Description:
def rewind_body(prepared_request):
"""Move file pointer back to its recorded starting position
so it can be read again on redirect.
""" |
body_seek = getattr(prepared_request.body, 'seek', None)
if body_seek is not None and isinstance(prepared_request._body_position, integer_types):
try:
body_seek(prepared_request._body_position)
except (IOError, OSError):
raise UnrewindableBodyError("An error occurred when rewinding request "
"body for redirect.")
else:
raise UnrewindableBodyError("Unable to rewind request body for redirect.") |
<SYSTEM_TASK:>
Does the node have a safe representation?
<END_TASK>
<USER_TASK:>
Description:
def has_safe_repr(value):
"""Does the node have a safe representation?""" |
if value is None or value is NotImplemented or value is Ellipsis:
return True
if type(value) in (bool, int, float, complex, range_type, Markup) + string_types:
return True
if type(value) in (tuple, list, set, frozenset):
for item in value:
if not has_safe_repr(item):
return False
return True
elif type(value) is dict:
for key, value in iteritems(value):
if not has_safe_repr(key):
return False
if not has_safe_repr(value):
return False
return True
return False |
<SYSTEM_TASK:>
Check if the names passed are accessed undeclared. The return value
<END_TASK>
<USER_TASK:>
Description:
def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
""" |
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared |
<SYSTEM_TASK:>
Enable buffering for the frame from that point onwards.
<END_TASK>
<USER_TASK:>
Description:
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards.""" |
frame.buffer = self.temporary_identifier()
self.writeline('%s = []' % frame.buffer) |
<SYSTEM_TASK:>
Return the buffer contents of the frame.
<END_TASK>
<USER_TASK:>
Description:
def return_buffer_contents(self, frame, force_unescaped=False):
"""Return the buffer contents of the frame.""" |
if not force_unescaped:
if frame.eval_ctx.volatile:
self.writeline('if context.eval_ctx.autoescape:')
self.indent()
self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
self.writeline('else:')
self.indent()
self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
return
elif frame.eval_ctx.autoescape:
self.writeline('return Markup(concat(%s))' % frame.buffer)
return
self.writeline('return concat(%s)' % frame.buffer) |
<SYSTEM_TASK:>
Yield or write into the frame buffer.
<END_TASK>
<USER_TASK:>
Description:
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer.""" |
if frame.buffer is None:
self.writeline('yield ', node)
else:
self.writeline('%s.append(' % frame.buffer, node) |
<SYSTEM_TASK:>
Simple shortcut for start_write + write + end_write.
<END_TASK>
<USER_TASK:>
Description:
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write.""" |
self.start_write(frame, node)
self.write(s)
self.end_write(frame) |
<SYSTEM_TASK:>
Write a string into the output stream.
<END_TASK>
<USER_TASK:>
Description:
def write(self, x):
"""Write a string into the output stream.""" |
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x) |
<SYSTEM_TASK:>
Combination of newline and write.
<END_TASK>
<USER_TASK:>
Description:
def writeline(self, x, node=None, extra=0):
"""Combination of newline and write.""" |
self.newline(node, extra)
self.write(x) |
<SYSTEM_TASK:>
Add one or more newlines before the next write.
<END_TASK>
<USER_TASK:>
Description:
def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write.""" |
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno |
<SYSTEM_TASK:>
Writes a function call to the stream for the current node.
<END_TASK>
<USER_TASK:>
Description:
def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict.
""" |
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write(', %s=%s' % (key, value))
if node.dyn_args:
self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(', **dict({')
else:
self.write(', **{')
for kwarg in node.kwargs:
self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
self.write(', ')
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
self.write('}, **')
self.visit(node.dyn_kwargs, frame)
self.write(')')
else:
self.write('}')
elif node.dyn_kwargs is not None:
self.write(', **')
self.visit(node.dyn_kwargs, frame) |
<SYSTEM_TASK:>
Pull all the dependencies.
<END_TASK>
<USER_TASK:>
Description:
def pull_dependencies(self, nodes):
"""Pull all the dependencies.""" |
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name)) |
<SYSTEM_TASK:>
Return a human readable position for the node.
<END_TASK>
<USER_TASK:>
Description:
def position(self, node):
"""Return a human readable position for the node.""" |
rv = 'line %d' % node.lineno
if self.name is not None:
rv += ' in ' + repr(self.name)
return rv |
<SYSTEM_TASK:>
Pops the topmost level for assignment tracking and updates the
<END_TASK>
<USER_TASK:>
Description:
def pop_assign_tracking(self, frame):
"""Pops the topmost level for assignment tracking and updates the
context variables if necessary.
""" |
vars = self._assign_stack.pop()
if not frame.toplevel or not vars:
return
public_names = [x for x in vars if x[:1] != '_']
if len(vars) == 1:
name = next(iter(vars))
ref = frame.symbols.ref(name)
self.writeline('context.vars[%r] = %s' % (name, ref))
else:
self.writeline('context.vars.update({')
for idx, name in enumerate(vars):
if idx:
self.write(', ')
ref = frame.symbols.ref(name)
self.write('%r: %s' % (name, ref))
self.write('})')
if public_names:
if len(public_names) == 1:
self.writeline('context.exported_vars.add(%r)' %
public_names[0])
else:
self.writeline('context.exported_vars.update((%s))' %
', '.join(imap(repr, public_names))) |
<SYSTEM_TASK:>
Handles includes.
<END_TASK>
<USER_TASK:>
Description:
def visit_Include(self, node, frame):
"""Handles includes.""" |
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
self.indent()
skip_event_yield = False
if node.with_context:
loop = self.environment.is_async and 'async for' or 'for'
self.writeline('%s event in template.root_render_func('
'template.new_context(context.get_all(), True, '
'%s)):' % (loop, self.dump_local_context(frame)))
elif self.environment.is_async:
self.writeline('for event in (await '
'template._get_default_module_async())'
'._body_stream:')
else:
if supports_yield_from:
self.writeline('yield from template._get_default_module()'
'._body_stream')
skip_event_yield = True
else:
self.writeline('for event in template._get_default_module()'
'._body_stream:')
if not skip_event_yield:
self.indent()
self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
self.outdent() |
<SYSTEM_TASK:>
Visit named imports.
<END_TASK>
<USER_TASK:>
Description:
def visit_FromImport(self, node, frame):
"""Visit named imports.""" |
self.newline(node)
self.write('included_template = %senvironment.get_template('
% (self.environment.is_async and 'await ' or ''))
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module%s(context.get_all(), True, %s)'
% (self.environment.is_async and '_async' or '',
self.dump_local_context(frame)))
elif self.environment.is_async:
self.write('_get_default_module_async()')
else:
self.write('_get_default_module()')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('%s = getattr(included_template, '
'%r, missing)' % (frame.symbols.ref(alias), name))
self.writeline('if %s is missing:' % frame.symbols.ref(alias))
self.indent()
self.writeline('%s = undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(frame.symbols.ref(alias),
'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
discarded_names.append(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline('context.vars[%r] = %s' %
(name, frame.symbols.ref(name)))
else:
self.writeline('context.vars.update({%s})' % ', '.join(
'%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
))
if discarded_names:
if len(discarded_names) == 1:
self.writeline('context.exported_vars.discard(%r)' %
discarded_names[0])
else:
self.writeline('context.exported_vars.difference_'
'update((%s))' % ', '.join(imap(repr, discarded_names))) |
<SYSTEM_TASK:>
Whether finalizer should be called at exit
<END_TASK>
<USER_TASK:>
Description:
def atexit(self):
"""Whether finalizer should be called at exit""" |
info = self._registry.get(self)
return bool(info) and info.atexit |
<SYSTEM_TASK:>
Serialize an element and its child nodes to a string
<END_TASK>
<USER_TASK:>
Description:
def tostring(element):
"""Serialize an element and its child nodes to a string""" |
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv) |
<SYSTEM_TASK:>
Return the visitor function for this node or `None` if no visitor
<END_TASK>
<USER_TASK:>
Description:
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
""" |
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None) |
<SYSTEM_TASK:>
As transformers may return lists in some places this method
<END_TASK>
<USER_TASK:>
Description:
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
""" |
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv |
<SYSTEM_TASK:>
Build a wheel from this project.
<END_TASK>
<USER_TASK:>
Description:
def build_wheel(
self, wheel_directory, config_settings=None,
metadata_directory=None):
"""Build a wheel from this project.
Returns the name of the newly created file.
In general, this will call the 'build_wheel' hook in the backend.
However, if that was previously called by
'prepare_metadata_for_build_wheel', and the same metadata_directory is
used, the previously built wheel will be copied to wheel_directory.
""" |
if metadata_directory is not None:
metadata_directory = abspath(metadata_directory)
return self._call_hook('build_wheel', {
'wheel_directory': abspath(wheel_directory),
'config_settings': config_settings,
'metadata_directory': metadata_directory,
}) |
<SYSTEM_TASK:>
Constructs a ResNet-18 model.
<END_TASK>
<USER_TASK:>
Description:
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
""" |
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model |
<SYSTEM_TASK:>
Constructs a ResNet-152 model.
<END_TASK>
<USER_TASK:>
Description:
def fbresnet152(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
""" |
model = FBResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['fbresnet152'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model |
<SYSTEM_TASK:>
Selectable global pooling function with dynamic input kernel size
<END_TASK>
<USER_TASK:>
Description:
def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
"""Selectable global pooling function with dynamic input kernel size
""" |
if pool_type == 'avgmaxc':
x = torch.cat([
F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
], dim=1)
elif pool_type == 'avgmax':
x_avg = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
x = 0.5 * (x_avg + x_max)
elif pool_type == 'max':
x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
else:
if pool_type != 'avg':
print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
x = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
return x |
<SYSTEM_TASK:>
Download a URL to a local file.
<END_TASK>
<USER_TASK:>
Description:
def download_url(url, destination=None, progress_bar=True):
"""Download a URL to a local file.
Parameters
----------
url : str
The URL to download.
destination : str, None
The destination of the file. If None is given the file is saved to a temporary directory.
progress_bar : bool
Whether to show a command-line progress bar while downloading.
Returns
-------
filename : str
The location of the downloaded file.
Notes
-----
Progress bar use/example adapted from tqdm documentation: https://github.com/tqdm/tqdm
""" |
def my_hook(t):
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
if tsize is not None:
t.total = tsize
if b > 0:
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
if progress_bar:
with tqdm(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t:
filename, _ = urlretrieve(url, filename=destination, reporthook=my_hook(t))
else:
filename, _ = urlretrieve(url, filename=destination) |
<SYSTEM_TASK:>
Get the cached value.
<END_TASK>
<USER_TASK:>
Description:
def unwrap(self, dt):
"""
Get the cached value.
Returns
-------
value : object
The cached value.
Raises
------
Expired
Raised when `dt` is greater than self.expires.
""" |
expires = self._expires
if expires is AlwaysExpired or expires < dt:
raise Expired(self._expires)
return self._value |
<SYSTEM_TASK:>
Get the value of a cached object.
<END_TASK>
<USER_TASK:>
Description:
def get(self, key, dt):
"""Get the value of a cached object.
Parameters
----------
key : any
The key to lookup.
dt : datetime
The time of the lookup.
Returns
-------
result : any
The value for ``key``.
Raises
------
KeyError
Raised if the key is not in the cache or the value for the key
has expired.
""" |
try:
return self._cache[key].unwrap(dt)
except Expired:
self.cleanup(self._cache[key]._unsafe_get_value())
del self._cache[key]
raise KeyError(key) |
<SYSTEM_TASK:>
Adds a new key value pair to the cache.
<END_TASK>
<USER_TASK:>
Description:
def set(self, key, value, expiration_dt):
"""Adds a new key value pair to the cache.
Parameters
----------
key : any
The key to use for the pair.
value : any
The value to store under the name ``key``.
expiration_dt : datetime
When should this mapping expire? The cache is considered invalid
for dates **strictly greater** than ``expiration_dt``.
""" |
self._cache[key] = CachedObject(value, expiration_dt) |
<SYSTEM_TASK:>
Ensures a subdirectory of the working directory.
<END_TASK>
<USER_TASK:>
Description:
def ensure_dir(self, *path_parts):
"""Ensures a subdirectory of the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
""" |
path = self.getpath(*path_parts)
ensure_directory(path)
return path |
<SYSTEM_TASK:>
Verify that DataFrames in ``frames`` have the same indexing scheme and are
<END_TASK>
<USER_TASK:>
Description:
def verify_frames_aligned(frames, calendar):
"""
Verify that DataFrames in ``frames`` have the same indexing scheme and are
aligned to ``calendar``.
Parameters
----------
frames : list[pd.DataFrame]
calendar : trading_calendars.TradingCalendar
Raises
------
ValueError
If frames have different indexes/columns, or if frame indexes do not
match a contiguous region of ``calendar``.
""" |
indexes = [f.index for f in frames]
check_indexes_all_same(indexes, message="DataFrame indexes don't match:")
columns = [f.columns for f in frames]
check_indexes_all_same(columns, message="DataFrame columns don't match:")
start, end = indexes[0][[0, -1]]
cal_sessions = calendar.sessions_in_range(start, end)
check_indexes_all_same(
[indexes[0], cal_sessions],
"DataFrame index doesn't match {} calendar:".format(calendar.name),
) |
<SYSTEM_TASK:>
Check if all values in a sequence are equal.
<END_TASK>
<USER_TASK:>
Description:
def same(*values):
"""
Check if all values in a sequence are equal.
Returns True on empty sequences.
Examples
--------
>>> same(1, 1, 1, 1)
True
>>> same(1, 2, 1)
False
>>> same()
True
""" |
if not values:
return True
first, rest = values[0], values[1:]
return all(value == first for value in rest) |
<SYSTEM_TASK:>
Perform a chained application of ``getattr`` on ``value`` with the values
<END_TASK>
<USER_TASK:>
Description:
def getattrs(value, attrs, default=_no_default):
"""
Perform a chained application of ``getattr`` on ``value`` with the values
in ``attrs``.
If ``default`` is supplied, return it if any of the attribute lookups fail.
Parameters
----------
value : object
Root of the lookup chain.
attrs : iterable[str]
Sequence of attributes to look up.
default : object, optional
Value to return if any of the lookups fail.
Returns
-------
result : object
Result of the lookup sequence.
Examples
--------
>>> class EmptyObject(object):
... pass
...
>>> obj = EmptyObject()
>>> obj.foo = EmptyObject()
>>> obj.foo.bar = "value"
>>> getattrs(obj, ('foo', 'bar'))
'value'
>>> getattrs(obj, ('foo', 'buzz'))
Traceback (most recent call last):
...
AttributeError: 'EmptyObject' object has no attribute 'buzz'
>>> getattrs(obj, ('foo', 'buzz'), 'default')
'default'
""" |
try:
for attr in attrs:
value = getattr(value, attr)
except AttributeError:
if default is _no_default:
raise
value = default
return value |
<SYSTEM_TASK:>
Decorator factory for setting attributes on a function.
<END_TASK>
<USER_TASK:>
Description:
def set_attribute(name, value):
"""
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Examples
--------
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
""" |
def decorator(f):
setattr(f, name, value)
return f
return decorator |
<SYSTEM_TASK:>
Fold a function over a sequence with right associativity.
<END_TASK>
<USER_TASK:>
Description:
def foldr(f, seq, default=_no_default):
"""Fold a function over a sequence with right associativity.
Parameters
----------
f : callable[any, any]
The function to reduce the sequence with.
The first argument will be the element of the sequence; the second
argument will be the accumulator.
seq : iterable[any]
The sequence to reduce.
default : any, optional
The starting value to reduce with. If not provided, the sequence
cannot be empty, and the last value of the sequence will be used.
Returns
-------
folded : any
The folded value.
Notes
-----
This functions works by reducing the list in a right associative way.
For example, imagine we are folding with ``operator.add`` or ``+``:
.. code-block:: python
foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))
In the more general case with an arbitrary function, ``foldr`` will expand
like so:
.. code-block:: python
foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))
For a more in depth discussion of left and right folds, see:
`https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_
The images in that page are very good for showing the differences between
``foldr`` and ``foldl`` (``reduce``).
.. note::
For performance reasons is is best to pass a strict (non-lazy) sequence,
for example, a list.
See Also
--------
:func:`functools.reduce`
:func:`sum`
""" |
return reduce(
flip(f),
reversed(seq),
*(default,) if default is not _no_default else ()
) |
<SYSTEM_TASK:>
r"""Projection vectors to the simplex domain
<END_TASK>
<USER_TASK:>
Description:
def simplex_projection(v, b=1):
r"""Projection vectors to the simplex domain
Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0
Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w
:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> proj # doctest: +NORMALIZE_WHITESPACE
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print(proj.sum())
1.0
Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2013 by Thomas Wiecki ([email protected]).
""" |
v = np.asarray(v)
p = len(v)
# Sort v into u in descending order
v = (v > 0) * v
u = np.sort(v)[::-1]
sv = np.cumsum(u)
rho = np.where(u > (sv - b) / np.arange(1, p + 1))[0][-1]
theta = np.max([0, (sv[rho] - b) / (rho + 1)])
w = (v - theta)
w[w < 0] = 0
return w |
<SYSTEM_TASK:>
Run an example module from zipline.examples.
<END_TASK>
<USER_TASK:>
Description:
def run_example(example_name, environ):
"""
Run an example module from zipline.examples.
""" |
mod = EXAMPLE_MODULES[example_name]
register_calendar("YAHOO", get_calendar("NYSE"), force=True)
return run_algorithm(
initialize=getattr(mod, 'initialize', None),
handle_data=getattr(mod, 'handle_data', None),
before_trading_start=getattr(mod, 'before_trading_start', None),
analyze=getattr(mod, 'analyze', None),
bundle='test',
environ=environ,
# Provide a default capital base, but allow the test to override.
**merge({'capital_base': 1e7}, mod._test_args())
) |
<SYSTEM_TASK:>
Compute slopes of linear regressions between columns of ``dependents`` and
<END_TASK>
<USER_TASK:>
Description:
def vectorized_beta(dependents, independent, allowed_missing, out=None):
"""
Compute slopes of linear regressions between columns of ``dependents`` and
``independent``.
Parameters
----------
dependents : np.array[N, M]
Array with columns of data to be regressed against ``independent``.
independent : np.array[N, 1]
Independent variable of the regression
allowed_missing : int
Number of allowed missing (NaN) observations per column. Columns with
more than this many non-nan observations in both ``dependents`` and
``independents`` will output NaN as the regression coefficient.
Returns
-------
slopes : np.array[M]
Linear regression coefficients for each column of ``dependents``.
""" |
# Cache these as locals since we're going to call them multiple times.
nan = np.nan
isnan = np.isnan
N, M = dependents.shape
if out is None:
out = np.full(M, nan)
# Copy N times as a column vector and fill with nans to have the same
# missing value pattern as the dependent variable.
#
# PERF_TODO: We could probably avoid the space blowup by doing this in
# Cython.
# shape: (N, M)
independent = np.where(
isnan(dependents),
nan,
independent,
)
# Calculate beta as Cov(X, Y) / Cov(X, X).
# https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line # noqa
#
# NOTE: The usual formula for covariance is::
#
# mean((X - mean(X)) * (Y - mean(Y)))
#
# However, we don't actually need to take the mean of both sides of the
# product, because of the folllowing equivalence::
#
# Let X_res = (X - mean(X)).
# We have:
#
# mean(X_res * (Y - mean(Y))) = mean(X_res * (Y - mean(Y)))
# (1) = mean((X_res * Y) - (X_res * mean(Y)))
# (2) = mean(X_res * Y) - mean(X_res * mean(Y))
# (3) = mean(X_res * Y) - mean(X_res) * mean(Y)
# (4) = mean(X_res * Y) - 0 * mean(Y)
# (5) = mean(X_res * Y)
#
#
# The tricky step in the above derivation is step (4). We know that
# mean(X_res) is zero because, for any X:
#
# mean(X - mean(X)) = mean(X) - mean(X) = 0.
#
# The upshot of this is that we only have to center one of `independent`
# and `dependent` when calculating covariances. Since we need the centered
# `independent` to calculate its variance in the next step, we choose to
# center `independent`.
# shape: (N, M)
ind_residual = independent - nanmean(independent, axis=0)
# shape: (M,)
covariances = nanmean(ind_residual * dependents, axis=0)
# We end up with different variances in each column here because each
# column may have a different subset of the data dropped due to missing
# data in the corresponding dependent column.
# shape: (M,)
independent_variances = nanmean(ind_residual ** 2, axis=0)
# shape: (M,)
np.divide(covariances, independent_variances, out=out)
# Write nans back to locations where we have more then allowed number of
# missing entries.
nanlocs = isnan(independent).sum(axis=0) > allowed_missing
out[nanlocs] = nan
return out |
<SYSTEM_TASK:>
Format a URL for loading data from Bank of Canada.
<END_TASK>
<USER_TASK:>
Description:
def _format_url(instrument_type,
instrument_ids,
start_date,
end_date,
earliest_allowed_date):
"""
Format a URL for loading data from Bank of Canada.
""" |
return (
"http://www.bankofcanada.ca/stats/results/csv"
"?lP=lookup_{instrument_type}_yields.php"
"&sR={restrict}"
"&se={instrument_ids}"
"&dF={start}"
"&dT={end}".format(
instrument_type=instrument_type,
instrument_ids='-'.join(map(prepend("L_"), instrument_ids)),
restrict=earliest_allowed_date.strftime("%Y-%m-%d"),
start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d"),
)
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.