text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Returns the named parse results as a nested dictionary. <END_TASK> <USER_TASK:> Description: def asDict( self ): """ Returns the named parse results as a nested dictionary. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) result_dict = result.asDict() print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} # even though a ParseResults supports dict-like access, sometime you just need to have a dict import json print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} """
if PY_3: item_fn = self.items else: item_fn = self.iteritems def toItem(obj): if isinstance(obj, ParseResults): if obj.haskeys(): return obj.asDict() else: return [toItem(v) for v in obj] else: return obj return dict((k,toItem(v)) for k,v in item_fn())
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def getName(self): r""" Returns the results name for this token expression. Useful when several different expressions might match at a particular location. Example:: integer = Word(nums) ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") house_number_expr = Suppress('#') + Word(nums, alphanums) user_data = (Group(house_number_expr)("house_number") | Group(ssn_expr)("ssn") | Group(integer)("age")) user_info = OneOrMore(user_data) result = user_info.parseString("22 111-22-3333 #221B") for item in result: print(item.getName(), ':', item[0]) prints:: age : 22 ssn : 111-22-3333 house_number : 221B """
if self.__name: return self.__name elif self.__parent: par = self.__parent() if par: return par.__lookup(self) else: return None elif (len(self) == 1 and len(self.__tokdict) == 1 and next(iter(self.__tokdict.values()))[0][1] in (0,-1)): return next(iter(self.__tokdict.keys())) else: return None
<SYSTEM_TASK:> Define name for this expression, makes debugging and exception messages clearer. <END_TASK> <USER_TASK:> Description: def setName( self, name ): """ Define name for this expression, makes debugging and exception messages clearer. Example:: Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) """
self.name = name self.errmsg = "Expected " + self.name if hasattr(self,"exception"): self.exception.msg = self.errmsg return self
<SYSTEM_TASK:> Overrides the default whitespace chars <END_TASK> <USER_TASK:> Description: def setWhitespaceChars( self, chars ): """ Overrides the default whitespace chars """
self.skipWhitespace = True self.whiteChars = chars self.copyDefaultWhiteChars = False return self
<SYSTEM_TASK:> Enable display of debugging messages while doing pattern matching. <END_TASK> <USER_TASK:> Description: def setDebugActions( self, startAction, successAction, exceptionAction ): """ Enable display of debugging messages while doing pattern matching. """
self.debugActions = (startAction or _defaultStartDebugAction, successAction or _defaultSuccessDebugAction, exceptionAction or _defaultExceptionDebugAction) self.debug = True return self
<SYSTEM_TASK:> Enable display of debugging messages while doing pattern matching. <END_TASK> <USER_TASK:> Description: def setDebug( self, flag=True ): """ Enable display of debugging messages while doing pattern matching. Set ``flag`` to True to enable, False to disable. Example:: wd = Word(alphas).setName("alphaword") integer = Word(nums).setName("numword") term = wd | integer # turn on debugging for wd wd.setDebug() OneOrMore(term).parseString("abc 123 xyz 890") prints:: Match alphaword at loc 0(1,1) Matched alphaword -> ['abc'] Match alphaword at loc 3(1,4) Exception raised:Expected alphaword (at char 4), (line:1, col:5) Match alphaword at loc 7(1,8) Matched alphaword -> ['xyz'] Match alphaword at loc 11(1,12) Exception raised:Expected alphaword (at char 12), (line:1, col:13) Match alphaword at loc 15(1,16) Exception raised:Expected alphaword (at char 15), (line:1, col:16) The output shown is that produced by the default debug actions - custom debug actions can be specified using :class:`setDebugActions`. Prior to attempting to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"`` is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression, which makes debugging and exception messages easier to understand - for instance, the default name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``. """
if flag: self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) else: self.debug = False return self
<SYSTEM_TASK:> Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on <END_TASK> <USER_TASK:> Description: def leaveWhitespace( self ): """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on all contained expressions."""
self.skipWhitespace = False self.exprs = [ e.copy() for e in self.exprs ] for e in self.exprs: e.leaveWhitespace() return self
<SYSTEM_TASK:> Helper to create a parse action for converting parsed date string to Python datetime.date <END_TASK> <USER_TASK:> Description: def convertToDate(fmt="%Y-%m-%d"): """ Helper to create a parse action for converting parsed date string to Python datetime.date Params - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) Example:: date_expr = pyparsing_common.iso8601_date.copy() date_expr.setParseAction(pyparsing_common.convertToDate()) print(date_expr.parseString("1999-12-31")) prints:: [datetime.date(1999, 12, 31)] """
def cvt_fn(s,l,t): try: return datetime.strptime(t[0], fmt).date() except ValueError as ve: raise ParseException(s, l, str(ve)) return cvt_fn
<SYSTEM_TASK:> Helper to create a parse action for converting parsed <END_TASK> <USER_TASK:> Description: def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): """Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] """
def cvt_fn(s,l,t): try: return datetime.strptime(t[0], fmt) except ValueError as ve: raise ParseException(s, l, str(ve)) return cvt_fn
<SYSTEM_TASK:> Look for VCS schemes in the URL. <END_TASK> <USER_TASK:> Description: def _match_vcs_scheme(url): # type: (str) -> Optional[str] """Look for VCS schemes in the URL. Returns the matched VCS scheme, or None if there's no match. """
from pipenv.patched.notpip._internal.vcs import VcsSupport for scheme in VcsSupport.schemes: if url.lower().startswith(scheme) and url[len(scheme)] in '+:': return scheme return None
<SYSTEM_TASK:> Check the Content-Type header to ensure the response contains HTML. <END_TASK> <USER_TASK:> Description: def _ensure_html_header(response): # type: (Response) -> None """Check the Content-Type header to ensure the response contains HTML. Raises `_NotHTML` if the content type is not text/html. """
content_type = response.headers.get("Content-Type", "") if not content_type.lower().startswith("text/html"): raise _NotHTML(content_type, response.request.method)
<SYSTEM_TASK:> Send a HEAD request to the URL, and ensure the response contains HTML. <END_TASK> <USER_TASK:> Description: def _ensure_html_response(url, session): # type: (str, PipSession) -> None """Send a HEAD request to the URL, and ensure the response contains HTML. Raises `_NotHTTP` if the URL is not available for a HEAD request, or `_NotHTML` if the content type is not text/html. """
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) if scheme not in {'http', 'https'}: raise _NotHTTP() resp = session.head(url, allow_redirects=True) resp.raise_for_status() _ensure_html_header(resp)
<SYSTEM_TASK:> Access an HTML page with GET, and return the response. <END_TASK> <USER_TASK:> Description: def _get_html_response(url, session): # type: (str, PipSession) -> Response """Access an HTML page with GET, and return the response. This consists of three parts: 1. If the URL looks suspiciously like an archive, send a HEAD first to check the Content-Type is HTML, to avoid downloading a large file. Raise `_NotHTTP` if the content type cannot be determined, or `_NotHTML` if it is not HTML. 2. Actually perform the request. Raise HTTP exceptions on network failures. 3. Check the Content-Type header to make sure we got HTML, and raise `_NotHTML` otherwise. """
if _is_url_like_archive(url): _ensure_html_response(url, session=session) logger.debug('Getting page %s', url) resp = session.get( url, headers={ "Accept": "text/html", # We don't want to blindly returned cached data for # /simple/, because authors generally expecting that # twine upload && pip install will function, but if # they've done a pip install in the last ~10 minutes # it won't. Thus by setting this to zero we will not # blindly use any cached data, however the benefit of # using max-age=0 instead of no-cache, is that we will # still support conditional requests, so we will still # minimize traffic sent in cases where the page hasn't # changed at all, we will just always incur the round # trip for the conditional GET now instead of only # once per 10 minutes. # For more information, please see pypa/pip#5670. "Cache-Control": "max-age=0", }, ) resp.raise_for_status() # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every # url we cannot know ahead of time for sure if something is HTML # or not. However we can check after we've downloaded it. _ensure_html_header(resp) return resp
<SYSTEM_TASK:> Find the separator's index based on the package's canonical name. <END_TASK> <USER_TASK:> Description: def _find_name_version_sep(egg_info, canonical_name): # type: (str, str) -> int """Find the separator's index based on the package's canonical name. `egg_info` must be an egg info string for the given package, and `canonical_name` must be the package's canonical name. This function is needed since the canonicalized name does not necessarily have the same length as the egg info's name part. An example:: >>> egg_info = 'foo__bar-1.0' >>> canonical_name = 'foo-bar' >>> _find_name_version_sep(egg_info, canonical_name) 8 """
# Project name and version must be separated by one single dash. Find all # occurrences of dashes; if the string in front of it matches the canonical # name, this is the one separating the name and version parts. for i, c in enumerate(egg_info): if c != "-": continue if canonicalize_name(egg_info[:i]) == canonical_name: return i raise ValueError("{} does not match {}".format(egg_info, canonical_name))
<SYSTEM_TASK:> Pull the version part out of a string. <END_TASK> <USER_TASK:> Description: def _egg_info_matches(egg_info, canonical_name): # type: (str, str) -> Optional[str] """Pull the version part out of a string. :param egg_info: The string to parse. E.g. foo-2.1 :param canonical_name: The canonicalized name of the package this belongs to. """
try: version_start = _find_name_version_sep(egg_info, canonical_name) + 1 except ValueError: return None version = egg_info[version_start:] if not version: return None return version
<SYSTEM_TASK:> Determine the HTML document's base URL. <END_TASK> <USER_TASK:> Description: def _determine_base_url(document, page_url): """Determine the HTML document's base URL. This looks for a ``<base>`` tag in the HTML document. If present, its href attribute denotes the base URL of anchor tags in the document. If there is no such tag (or if it does not have a valid href attribute), the HTML file's URL is used as the base URL. :param document: An HTML document representation. The current implementation expects the result of ``html5lib.parse()``. :param page_url: The URL of the HTML document. """
for base in document.findall(".//base"): href = base.get("href") if href is not None: return href return page_url
<SYSTEM_TASK:> Determine if we have any encoding information in our headers. <END_TASK> <USER_TASK:> Description: def _get_encoding_from_headers(headers): """Determine if we have any encoding information in our headers. """
if headers and "Content-Type" in headers: content_type, params = cgi.parse_header(headers["Content-Type"]) if "charset" in params: return params['charset'] return None
<SYSTEM_TASK:> Returns the locations found via self.index_urls <END_TASK> <USER_TASK:> Description: def _get_index_urls_locations(self, project_name): # type: (str) -> List[str] """Returns the locations found via self.index_urls Checks the url_name on the main (first in the list) index and use this url_name to produce all locations """
def mkurl_pypi_url(url): loc = posixpath.join( url, urllib_parse.quote(canonicalize_name(project_name))) # For maximum compatibility with easy_install, ensure the path # ends in a trailing slash. Although this isn't in the spec # (and PyPI can handle it without the slash) some other index # implementations might break if they relied on easy_install's # behavior. if not loc.endswith('/'): loc = loc + '/' return loc return [mkurl_pypi_url(url) for url in self.index_urls]
<SYSTEM_TASK:> Find all available InstallationCandidate for project_name <END_TASK> <USER_TASK:> Description: def find_all_candidates(self, project_name): # type: (str) -> List[Optional[InstallationCandidate]] """Find all available InstallationCandidate for project_name This checks index_urls and find_links. All versions found are returned as an InstallationCandidate list. See _link_package_versions for details on which files are accepted """
index_locations = self._get_index_urls_locations(project_name) index_file_loc, index_url_loc = self._sort_locations(index_locations) fl_file_loc, fl_url_loc = self._sort_locations( self.find_links, expand_dir=True, ) file_locations = (Link(url) for url in itertools.chain( index_file_loc, fl_file_loc, )) # We trust every url that the user has given us whether it was given # via --index-url or --find-links. # We want to filter out any thing which does not have a secure origin. url_locations = [ link for link in itertools.chain( (Link(url) for url in index_url_loc), (Link(url) for url in fl_url_loc), ) if self._validate_secure_origin(logger, link) ] logger.debug('%d location(s) to search for versions of %s:', len(url_locations), project_name) for location in url_locations: logger.debug('* %s', location) canonical_name = canonicalize_name(project_name) formats = self.format_control.get_allowed_formats(canonical_name) search = Search(project_name, canonical_name, formats) find_links_versions = self._package_versions( # We trust every directly linked archive in find_links (Link(url, '-f') for url in self.find_links), search ) page_versions = [] for page in self._get_pages(url_locations, project_name): try: logger.debug('Analyzing links from page %s', page.url) except AttributeError: continue with indent_log(): page_versions.extend( self._package_versions(page.iter_links(), search) ) file_versions = self._package_versions(file_locations, search) if file_versions: file_versions.sort(reverse=True) logger.debug( 'Local files found: %s', ', '.join([ url_to_path(candidate.location.url) for candidate in file_versions ]) ) # This is an intentional priority ordering return file_versions + find_links_versions + page_versions
<SYSTEM_TASK:> Try to find a Link matching req <END_TASK> <USER_TASK:> Description: def find_requirement(self, req, upgrade, ignore_compatibility=False): # type: (InstallRequirement, bool, bool) -> Optional[Link] """Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean Returns a Link if found, Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise """
all_candidates = self.find_all_candidates(req.name) # Filter out anything which doesn't match our specifier compatible_versions = set( req.specifier.filter( # We turn the version object into a str here because otherwise # when we're debundled but setuptools isn't, Python will see # packaging.version.Version and # pkg_resources._vendor.packaging.version.Version as different # types. This way we'll use a str as a common data interchange # format. If we stop using the pkg_resources provided specifier # and start using our own, we can drop the cast to str(). [str(c.version) for c in all_candidates], prereleases=( self.allow_all_prereleases if self.allow_all_prereleases else None ), ) ) applicable_candidates = [ # Again, converting to str to deal with debundling. c for c in all_candidates if str(c.version) in compatible_versions ] if applicable_candidates: best_candidate = max(applicable_candidates, key=self._candidate_sort_key) else: best_candidate = None if req.satisfied_by is not None: installed_version = parse_version(req.satisfied_by.version) else: installed_version = None if installed_version is None and best_candidate is None: logger.critical( 'Could not find a version that satisfies the requirement %s ' '(from versions: %s)', req, ', '.join( sorted( {str(c.version) for c in all_candidates}, key=parse_version, ) ) ) raise DistributionNotFound( 'No matching distribution found for %s' % req ) best_installed = False if installed_version and ( best_candidate is None or best_candidate.version <= installed_version): best_installed = True if not upgrade and installed_version is not None: if best_installed: logger.debug( 'Existing installed version (%s) is most up-to-date and ' 'satisfies requirement', installed_version, ) else: logger.debug( 'Existing installed version (%s) satisfies requirement ' '(most up-to-date version is %s)', installed_version, best_candidate.version, ) return None if best_installed: # We have an existing version, and its the best version logger.debug( 'Installed version (%s) is most up-to-date (past versions: ' '%s)', installed_version, ', '.join(sorted(compatible_versions, key=parse_version)) or "none", ) raise BestVersionAlreadyInstalled logger.debug( 'Using version %s (newest of versions: %s)', best_candidate.version, ', '.join(sorted(compatible_versions, key=parse_version)) ) return best_candidate.location
<SYSTEM_TASK:> Deregister a previously registered hook. <END_TASK> <USER_TASK:> Description: def deregister_hook(self, event, hook): """Deregister a previously registered hook. Returns True if the hook existed, False if not. """
try: self.hooks[event].remove(hook) return True except ValueError: return False
<SYSTEM_TASK:> Prepares the entire request with the given parameters. <END_TASK> <USER_TASK:> Description: def prepare(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None): """Prepares the entire request with the given parameters."""
self.prepare_method(method) self.prepare_url(url, params) self.prepare_headers(headers) self.prepare_cookies(cookies) self.prepare_body(data, files, json) self.prepare_auth(auth, url) # Note that prepare_auth must be last to enable authentication schemes # such as OAuth to work on a fully prepared request. # This MUST go after prepare_auth. Authenticators could add a hook self.prepare_hooks(hooks)
<SYSTEM_TASK:> Prepare Content-Length header based on request method and body <END_TASK> <USER_TASK:> Description: def prepare_content_length(self, body): """Prepare Content-Length header based on request method and body"""
if body is not None: length = super_len(body) if length: # If length exists, set it. Otherwise, we fallback # to Transfer-Encoding: chunked. self.headers['Content-Length'] = builtin_str(length) elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None: # Set Content-Length to 0 for methods that can have a body # but don't provide one. (i.e. not GET or HEAD) self.headers['Content-Length'] = '0'
<SYSTEM_TASK:> Prepares the given HTTP auth data. <END_TASK> <USER_TASK:> Description: def prepare_auth(self, auth, url=''): """Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first. if auth is None: url_auth = get_auth_from_url(self.url) auth = url_auth if any(url_auth) else None if auth: if isinstance(auth, tuple) and len(auth) == 2: # special-case basic HTTP auth auth = HTTPBasicAuth(*auth) # Allow auth to make its changes. r = auth(self) # Update self to reflect the auth changes. self.__dict__.update(r.__dict__) # Recompute Content-Length self.prepare_content_length(self.body)
<SYSTEM_TASK:> Prepares the given HTTP cookie data. <END_TASK> <USER_TASK:> Description: def prepare_cookies(self, cookies): """Prepares the given HTTP cookie data. This function eventually generates a ``Cookie`` header from the given cookies using cookielib. Due to cookielib's design, the header will not be regenerated if it already exists, meaning this function can only be called once for the life of the :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the "Cookie" header is removed beforehand. """
if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: self.headers['Cookie'] = cookie_header
<SYSTEM_TASK:> True if this Response one of the permanent versions of redirect. <END_TASK> <USER_TASK:> Description: def is_permanent_redirect(self): """True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
<SYSTEM_TASK:> Releases the connection back to the pool. Once this method has been <END_TASK> <USER_TASK:> Description: def close(self): """Releases the connection back to the pool. Once this method has been called the underlying ``raw`` object must not be accessed again. *Note: Should not normally need to be called explicitly.* """
if not self._content_consumed: self.raw.close() release_conn = getattr(self.raw, 'release_conn', None) if release_conn is not None: release_conn()
<SYSTEM_TASK:> Format an error message for an EnvironmentError <END_TASK> <USER_TASK:> Description: def create_env_error_message(error, show_traceback, using_user_site): """Format an error message for an EnvironmentError It may occur anytime during the execution of the install command. """
parts = [] # Mention the error if we are not going to show a traceback parts.append("Could not install packages due to an EnvironmentError") if not show_traceback: parts.append(": ") parts.append(str(error)) else: parts.append(".") # Spilt the error indication from a helper message (if any) parts[-1] += "\n" # Suggest useful actions to the user: # (1) using user site-packages or (2) verifying the permissions if error.errno == errno.EACCES: user_option_part = "Consider using the `--user` option" permissions_part = "Check the permissions" if not using_user_site: parts.extend([ user_option_part, " or ", permissions_part.lower(), ]) else: parts.append(permissions_part) parts.append(".\n") return "".join(parts).strip() + "\n"
<SYSTEM_TASK:> Is the error actually a timeout? Will raise a ReadTimeout or pass <END_TASK> <USER_TASK:> Description: def _raise_timeout(self, err, url, timeout_value): """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout): raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) # See the above comment about EAGAIN in Python 3. In Python 2 we have # to specifically catch it and throw the timeout error if hasattr(err, 'errno') and err.errno in _blocking_errnos: raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) # Catch possible read timeouts thrown as SSL errors. If not the # case, rethrow the original. We need to do this because of: # http://bugs.python.org/issue10272 if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python < 2.7.4 raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
<SYSTEM_TASK:> Rebuilds the base system path and all of the contained finders within it. <END_TASK> <USER_TASK:> Description: def reload_system_path(self): # type: () -> None """ Rebuilds the base system path and all of the contained finders within it. This will re-apply any changes to the environment or any version changes on the system. """
if self._system_path is not None: self._system_path.clear_caches() self._system_path = None six.moves.reload_module(pyfinder_path) self._system_path = self.create_system_path()
<SYSTEM_TASK:> Create a new Enum subclass that replaces a collection of global constants <END_TASK> <USER_TASK:> Description: def _convert(cls, name, module, filter, source=None): """ Create a new Enum subclass that replaces a collection of global constants """
# convert all constants from source (or module) that pass filter() to # a new Enum called name, and export the enum and its members back to # module; # also, replace the __reduce_ex__ method so unpickling works in # previous Python versions module_globals = vars(_sys.modules[module]) if source: source = vars(source) else: source = module_globals members = dict((name, value) for name, value in source.items() if filter(name)) cls = cls(name, members, module=module) cls.__reduce_ex__ = _reduce_ex_by_name module_globals.update(cls.__members__) module_globals[name] = cls return cls
<SYSTEM_TASK:> Class decorator that ensures only unique members exist in an enumeration. <END_TASK> <USER_TASK:> Description: def unique(enumeration): """Class decorator that ensures only unique members exist in an enumeration."""
duplicates = [] for name, member in enumeration.__members__.items(): if name != member.name: duplicates.append((name, member.name)) if duplicates: duplicate_names = ', '.join( ["%s -> %s" % (alias, name) for (alias, name) in duplicates] ) raise ValueError('duplicate names found in %r: %s' % (enumeration, duplicate_names) ) return enumeration
<SYSTEM_TASK:> Convenience method to create a new Enum class. <END_TASK> <USER_TASK:> Description: def _create_(cls, class_name, names=None, module=None, type=None, start=1): """Convenience method to create a new Enum class. `names` can be: * A string containing member names, separated either with spaces or commas. Values are auto-numbered from 1. * An iterable of member names. Values are auto-numbered from 1. * An iterable of (member name, value) pairs. * A mapping of member name -> value. """
if pyver < 3.0: # if class_name is unicode, attempt a conversion to ASCII if isinstance(class_name, unicode): try: class_name = class_name.encode('ascii') except UnicodeEncodeError: raise TypeError('%r is not representable in ASCII' % class_name) metacls = cls.__class__ if type is None: bases = (cls, ) else: bases = (type, cls) classdict = metacls.__prepare__(class_name, bases) _order_ = [] # special processing needed for names? if isinstance(names, basestring): names = names.replace(',', ' ').split() if isinstance(names, (tuple, list)) and isinstance(names[0], basestring): names = [(e, i+start) for (i, e) in enumerate(names)] # Here, names is either an iterable of (name, value) or a mapping. item = None # in case names is empty for item in names: if isinstance(item, basestring): member_name, member_value = item, names[item] else: member_name, member_value = item classdict[member_name] = member_value _order_.append(member_name) # only set _order_ in classdict if name/value was not from a mapping if not isinstance(item, basestring): classdict['_order_'] = ' '.join(_order_) enum_class = metacls.__new__(metacls, class_name, bases, classdict) # TODO: replace the frame hack if a blessed way to know the calling # module is ever developed if module is None: try: module = _sys._getframe(2).f_globals['__name__'] except (AttributeError, ValueError): pass if module is None: _make_class_unpicklable(enum_class) else: enum_class.__module__ = module return enum_class
<SYSTEM_TASK:> Returns the type for creating enum members, and the first inherited <END_TASK> <USER_TASK:> Description: def _get_mixins_(bases): """Returns the type for creating enum members, and the first inherited enum class. bases: the tuple of bases that was given to __new__ """
if not bases or Enum is None: return object, Enum # double check that we are not subclassing a class with existing # enumeration members; while we're at it, see if any other data # type has been mixed in so we can use the correct __new__ member_type = first_enum = None for base in bases: if (base is not Enum and issubclass(base, Enum) and base._member_names_): raise TypeError("Cannot extend enumerations") # base is now the last base in bases if not issubclass(base, Enum): raise TypeError("new enumerations must be created as " "`ClassName([mixin_type,] enum_type)`") # get correct mix-in type (either mix-in type of Enum subclass, or # first base if last base is Enum) if not issubclass(bases[0], Enum): member_type = bases[0] # first data type first_enum = bases[-1] # enum type else: for base in bases[0].__mro__: # most common: (IntEnum, int, Enum, object) # possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>, # <class 'int'>, <Enum 'Enum'>, # <class 'object'>) if issubclass(base, Enum): if first_enum is None: first_enum = base else: if member_type is None: member_type = base return member_type, first_enum
<SYSTEM_TASK:> Convert the package data into something usable <END_TASK> <USER_TASK:> Description: def format_for_columns(pkgs, options): """ Convert the package data into something usable by output_package_listing_columns. """
running_outdated = options.outdated # Adjust the header for the `pip list --outdated` case. if running_outdated: header = ["Package", "Version", "Latest", "Type"] else: header = ["Package", "Version"] data = [] if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs): header.append("Location") if options.verbose >= 1: header.append("Installer") for proj in pkgs: # if we're working on the 'outdated' list, separate out the # latest_version and type row = [proj.project_name, proj.version] if running_outdated: row.append(proj.latest_version) row.append(proj.latest_filetype) if options.verbose >= 1 or dist_is_editable(proj): row.append(proj.location) if options.verbose >= 1: row.append(get_installer(proj)) data.append(row) return data, header
<SYSTEM_TASK:> Make a script. <END_TASK> <USER_TASK:> Description: def make(self, specification, options=None): """ Make a script. :param specification: The specification, which is either a valid export entry specification (to make a script from a callable) or a filename (to make a script by copying from a source location). :param options: A dictionary of options controlling script generation. :return: A list of all absolute pathnames written to. """
filenames = [] entry = get_export_entry(specification) if entry is None: self._copy_script(specification, filenames) else: self._make_script(entry, filenames, options=options) return filenames
<SYSTEM_TASK:> Loads a pipfile from a given path. <END_TASK> <USER_TASK:> Description: def load(pipfile_path=None, inject_env=True): """Loads a pipfile from a given path. If none is provided, one will try to be found. """
if pipfile_path is None: pipfile_path = Pipfile.find() return Pipfile.load(filename=pipfile_path, inject_env=inject_env)
<SYSTEM_TASK:> Recursively injects environment variables into TOML values <END_TASK> <USER_TASK:> Description: def inject_environment_variables(self, d): """ Recursively injects environment variables into TOML values """
if not d: return d if isinstance(d, six.string_types): return os.path.expandvars(d) for k, v in d.items(): if isinstance(v, six.string_types): d[k] = os.path.expandvars(v) elif isinstance(v, dict): d[k] = self.inject_environment_variables(v) elif isinstance(v, list): d[k] = [self.inject_environment_variables(e) for e in v] return d
<SYSTEM_TASK:> Load a Pipfile from a given filename. <END_TASK> <USER_TASK:> Description: def load(klass, filename, inject_env=True): """Load a Pipfile from a given filename."""
p = PipfileParser(filename=filename) pipfile = klass(filename=filename) pipfile.data = p.parse(inject_env=inject_env) return pipfile
<SYSTEM_TASK:> copy data from file-like object fsrc to file-like object fdst <END_TASK> <USER_TASK:> Description: def copyfileobj(fsrc, fdst, length=16*1024): """copy data from file-like object fsrc to file-like object fdst"""
while 1: buf = fsrc.read(length) if not buf: break fdst.write(buf)
<SYSTEM_TASK:> Recursively move a file or directory to another location. This is <END_TASK> <USER_TASK:> Description: def move(src, dst): """Recursively move a file or directory to another location. This is similar to the Unix "mv" command. If the destination is a directory or a symlink to a directory, the source is moved inside the directory. The destination path must not already exist. If the destination already exists but is not a directory, it may be overwritten depending on os.rename() semantics. If the destination is on our current filesystem, then rename() is used. Otherwise, src is copied to the destination and then removed. A lot more could be done here... A look at a mv.c shows a lot of the issues this implementation glosses over. """
real_dst = dst if os.path.isdir(dst): if _samefile(src, dst): # We might be on a case insensitive filesystem, # perform the rename anyway. os.rename(src, dst) return real_dst = os.path.join(dst, _basename(src)) if os.path.exists(real_dst): raise Error("Destination path '%s' already exists" % real_dst) try: os.rename(src, real_dst) except OSError: if os.path.isdir(src): if _destinsrc(src, dst): raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) copytree(src, real_dst, symlinks=True) rmtree(src) else: copy2(src, real_dst) os.unlink(src)
<SYSTEM_TASK:> Returns a gid, given a group name. <END_TASK> <USER_TASK:> Description: def _get_gid(name): """Returns a gid, given a group name."""
if getgrnam is None or name is None: return None try: result = getgrnam(name) except KeyError: result = None if result is not None: return result[2] return None
<SYSTEM_TASK:> Create a zip file from all the files under 'base_dir'. <END_TASK> <USER_TASK:> Description: def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_name' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises ExecError. Returns the name of the output zip file. """
zip_filename = base_name + ".zip" archive_dir = os.path.dirname(base_name) if not os.path.exists(archive_dir): if logger is not None: logger.info("creating %s", archive_dir) if not dry_run: os.makedirs(archive_dir) # If zipfile module is not available, try spawning an external 'zip' # command. try: import zipfile except ImportError: zipfile = None if zipfile is None: _call_external_zip(base_dir, zip_filename, verbose, dry_run) else: if logger is not None: logger.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) if not dry_run: zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) for dirpath, dirnames, filenames in os.walk(base_dir): for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): zip.write(path, path) if logger is not None: logger.info("adding '%s'", path) zip.close() return zip_filename
<SYSTEM_TASK:> Returns a list of supported formats for archiving and unarchiving. <END_TASK> <USER_TASK:> Description: def get_archive_formats(): """Returns a list of supported formats for archiving and unarchiving. Each element of the returned sequence is a tuple (name, description) """
formats = [(name, registry[2]) for name, registry in _ARCHIVE_FORMATS.items()] formats.sort() return formats
<SYSTEM_TASK:> Registers an archive format. <END_TASK> <USER_TASK:> Description: def register_archive_format(name, function, extra_args=None, description=''): """Registers an archive format. name is the name of the format. function is the callable that will be used to create archives. If provided, extra_args is a sequence of (name, value) tuples that will be passed as arguments to the callable. description can be provided to describe the format, and will be returned by the get_archive_formats() function. """
if extra_args is None: extra_args = [] if not isinstance(function, collections.Callable): raise TypeError('The %s object is not callable' % function) if not isinstance(extra_args, (tuple, list)): raise TypeError('extra_args needs to be a sequence') for element in extra_args: if not isinstance(element, (tuple, list)) or len(element) !=2: raise TypeError('extra_args elements are : (arg_name, value)') _ARCHIVE_FORMATS[name] = (function, extra_args, description)
<SYSTEM_TASK:> Returns a list of supported formats for unpacking. <END_TASK> <USER_TASK:> Description: def get_unpack_formats(): """Returns a list of supported formats for unpacking. Each element of the returned sequence is a tuple (name, extensions, description) """
formats = [(name, info[0], info[3]) for name, info in _UNPACK_FORMATS.items()] formats.sort() return formats
<SYSTEM_TASK:> Checks what gets registered as an unpacker. <END_TASK> <USER_TASK:> Description: def _check_unpack_options(extensions, function, extra_args): """Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension existing_extensions = {} for name, info in _UNPACK_FORMATS.items(): for ext in info[0]: existing_extensions[ext] = name for extension in extensions: if extension in existing_extensions: msg = '%s is already registered for "%s"' raise RegistryError(msg % (extension, existing_extensions[extension])) if not isinstance(function, collections.Callable): raise TypeError('The registered function must be a callable')
<SYSTEM_TASK:> Registers an unpack format. <END_TASK> <USER_TASK:> Description: def register_unpack_format(name, extensions, function, extra_args=None, description=''): """Registers an unpack format. `name` is the name of the format. `extensions` is a list of extensions corresponding to the format. `function` is the callable that will be used to unpack archives. The callable will receive archives to unpack. If it's unable to handle an archive, it needs to raise a ReadError exception. If provided, `extra_args` is a sequence of (name, value) tuples that will be passed as arguments to the callable. description can be provided to describe the format, and will be returned by the get_unpack_formats() function. """
if extra_args is None: extra_args = [] _check_unpack_options(extensions, function, extra_args) _UNPACK_FORMATS[name] = extensions, function, extra_args, description
<SYSTEM_TASK:> Unpack an archive. <END_TASK> <USER_TASK:> Description: def unpack_archive(filename, extract_dir=None, format=None): """Unpack an archive. `filename` is the name of the archive. `extract_dir` is the name of the target directory, where the archive is unpacked. If not provided, the current working directory is used. `format` is the archive format: one of "zip", "tar", or "gztar". Or any other registered format. If not provided, unpack_archive will use the filename extension and see if an unpacker was registered for that extension. In case none is found, a ValueError is raised. """
if extract_dir is None: extract_dir = os.getcwd() if format is not None: try: format_info = _UNPACK_FORMATS[format] except KeyError: raise ValueError("Unknown unpack format '{0}'".format(format)) func = format_info[1] func(filename, extract_dir, **dict(format_info[2])) else: # we need to look at the registered unpackers supported extensions format = _find_unpack_format(filename) if format is None: raise ReadError("Unknown archive format '{0}'".format(filename)) func = _UNPACK_FORMATS[format][1] kwargs = dict(_UNPACK_FORMATS[format][2]) func(filename, extract_dir, **kwargs)
<SYSTEM_TASK:> Parse an HTML fragment as a string or file-like object into a tree <END_TASK> <USER_TASK:> Description: def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs): """Parse an HTML fragment as a string or file-like object into a tree :arg doc: the fragment to parse as a string or file-like object :arg container: the container context to parse the fragment in :arg treebuilder: the treebuilder to use when parsing :arg namespaceHTMLElements: whether or not to namespace HTML elements :returns: parsed tree Example: >>> from html5lib.html5libparser import parseFragment >>> parseFragment('<b>this is a fragment</b>') <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090> """
tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) return p.parseFragment(doc, container=container, **kwargs)
<SYSTEM_TASK:> Parse a HTML document into a well-formed tree <END_TASK> <USER_TASK:> Description: def parse(self, stream, *args, **kwargs): """Parse a HTML document into a well-formed tree :arg stream: a file-like object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element). :arg scripting: treat noscript elements as if JavaScript was turned on :returns: parsed tree Example: >>> from html5lib.html5parser import HTMLParser >>> parser = HTMLParser() >>> parser.parse('<html><body><p>This is a doc</p></body></html>') <Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0> """
self._parse(stream, False, None, *args, **kwargs) return self.tree.getDocument()
<SYSTEM_TASK:> Parse a HTML fragment into a well-formed tree fragment <END_TASK> <USER_TASK:> Description: def parseFragment(self, stream, *args, **kwargs): """Parse a HTML fragment into a well-formed tree fragment :arg container: name of the element we're setting the innerHTML property if set to None, default to 'div' :arg stream: a file-like object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) :arg scripting: treat noscript elements as if JavaScript was turned on :returns: parsed tree Example: >>> from html5lib.html5libparser import HTMLParser >>> parser = HTMLParser() >>> parser.parseFragment('<b>this is a fragment</b>') <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090> """
self._parse(stream, True, *args, **kwargs) return self.tree.getFragment()
<SYSTEM_TASK:> Construct tree representation of the pkgs from the index. <END_TASK> <USER_TASK:> Description: def construct_tree(index): """Construct tree representation of the pkgs from the index. The keys of the dict representing the tree will be objects of type DistPackage and the values will be list of ReqPackage objects. :param dict index: dist index ie. index of pkgs by their keys :returns: tree of pkgs and their dependencies :rtype: dict """
return dict((p, [ReqPackage(r, index.get(r.key)) for r in p.requires()]) for p in index.values())
<SYSTEM_TASK:> Sorts the dict representation of the tree <END_TASK> <USER_TASK:> Description: def sorted_tree(tree): """Sorts the dict representation of the tree The root packages as well as the intermediate packages are sorted in the alphabetical order of the package names. :param dict tree: the pkg dependency tree obtained by calling `construct_tree` function :returns: sorted tree :rtype: collections.OrderedDict """
return OrderedDict(sorted([(k, sorted(v, key=attrgetter('key'))) for k, v in tree.items()], key=lambda kv: kv[0].key))
<SYSTEM_TASK:> Find a root in a tree by it's key <END_TASK> <USER_TASK:> Description: def find_tree_root(tree, key): """Find a root in a tree by it's key :param dict tree: the pkg dependency tree obtained by calling `construct_tree` function :param str key: key of the root node to find :returns: a root node if found else None :rtype: mixed """
result = [p for p in tree.keys() if p.key == key] assert len(result) in [0, 1] return None if len(result) == 0 else result[0]
<SYSTEM_TASK:> Reverse the dependency tree. <END_TASK> <USER_TASK:> Description: def reverse_tree(tree): """Reverse the dependency tree. ie. the keys of the resulting dict are objects of type ReqPackage and the values are lists of DistPackage objects. :param dict tree: the pkg dependency tree obtained by calling `construct_tree` function :returns: reversed tree :rtype: dict """
rtree = defaultdict(list) child_keys = set(c.key for c in flatten(tree.values())) for k, vs in tree.items(): for v in vs: node = find_tree_root(rtree, v.key) or v rtree[node].append(k.as_required_by(v)) if k.key not in child_keys: rtree[k.as_requirement()] = [] return rtree
<SYSTEM_TASK:> Guess the version of a pkg when pip doesn't provide it <END_TASK> <USER_TASK:> Description: def guess_version(pkg_key, default='?'): """Guess the version of a pkg when pip doesn't provide it :param str pkg_key: key of the package :param str default: default version to return if unable to find :returns: version :rtype: string """
try: m = import_module(pkg_key) except ImportError: return default else: return getattr(m, '__version__', default)
<SYSTEM_TASK:> Convert tree to string representation <END_TASK> <USER_TASK:> Description: def render_tree(tree, list_all=True, show_only=None, frozen=False, exclude=None): """Convert tree to string representation :param dict tree: the package tree :param bool list_all: whether to list all the pgks at the root level or only those that are the sub-dependencies :param set show_only: set of select packages to be shown in the output. This is optional arg, default: None. :param bool frozen: whether or not show the names of the pkgs in the output that's favourable to pip --freeze :param set exclude: set of select packages to be excluded from the output. This is optional arg, default: None. :returns: string representation of the tree :rtype: str """
tree = sorted_tree(tree) branch_keys = set(r.key for r in flatten(tree.values())) nodes = tree.keys() use_bullets = not frozen key_tree = dict((k.key, v) for k, v in tree.items()) get_children = lambda n: key_tree.get(n.key, []) if show_only: nodes = [p for p in nodes if p.key in show_only or p.project_name in show_only] elif not list_all: nodes = [p for p in nodes if p.key not in branch_keys] def aux(node, parent=None, indent=0, chain=None): if exclude and (node.key in exclude or node.project_name in exclude): return [] if chain is None: chain = [node.project_name] node_str = node.render(parent, frozen) if parent: prefix = ' '*indent + ('- ' if use_bullets else '') node_str = prefix + node_str result = [node_str] children = [aux(c, node, indent=indent+2, chain=chain+[c.project_name]) for c in get_children(node) if c.project_name not in chain] result += list(flatten(children)) return result lines = flatten([aux(p) for p in nodes]) return '\n'.join(lines)
<SYSTEM_TASK:> Converts the tree into a flat json representation. <END_TASK> <USER_TASK:> Description: def render_json(tree, indent): """Converts the tree into a flat json representation. The json repr will be a list of hashes, each hash having 2 fields: - package - dependencies: list of dependencies :param dict tree: dependency tree :param int indent: no. of spaces to indent json :returns: json representation of the tree :rtype: str """
return json.dumps([{'package': k.as_dict(), 'dependencies': [v.as_dict() for v in vs]} for k, vs in tree.items()], indent=indent)
<SYSTEM_TASK:> Converts the tree into a nested json representation. <END_TASK> <USER_TASK:> Description: def render_json_tree(tree, indent): """Converts the tree into a nested json representation. The json repr will be a list of hashes, each hash having the following fields: - package_name - key - required_version - installed_version - dependencies: list of dependencies :param dict tree: dependency tree :param int indent: no. of spaces to indent json :returns: json representation of the tree :rtype: str """
tree = sorted_tree(tree) branch_keys = set(r.key for r in flatten(tree.values())) nodes = [p for p in tree.keys() if p.key not in branch_keys] key_tree = dict((k.key, v) for k, v in tree.items()) get_children = lambda n: key_tree.get(n.key, []) def aux(node, parent=None, chain=None): if chain is None: chain = [node.project_name] d = node.as_dict() if parent: d['required_version'] = node.version_spec if node.version_spec else 'Any' else: d['required_version'] = d['installed_version'] d['dependencies'] = [ aux(c, parent=node, chain=chain+[c.project_name]) for c in get_children(node) if c.project_name not in chain ] return d return json.dumps([aux(p) for p in nodes], indent=indent)
<SYSTEM_TASK:> Output dependency graph as one of the supported GraphViz output formats. <END_TASK> <USER_TASK:> Description: def dump_graphviz(tree, output_format='dot'): """Output dependency graph as one of the supported GraphViz output formats. :param dict tree: dependency graph :param string output_format: output format :returns: representation of tree in the specified output format :rtype: str or binary representation depending on the output format """
try: from graphviz import backend, Digraph except ImportError: print('graphviz is not available, but necessary for the output ' 'option. Please install it.', file=sys.stderr) sys.exit(1) if output_format not in backend.FORMATS: print('{0} is not a supported output format.'.format(output_format), file=sys.stderr) print('Supported formats are: {0}'.format( ', '.join(sorted(backend.FORMATS))), file=sys.stderr) sys.exit(1) graph = Digraph(format=output_format) for package, deps in tree.items(): project_name = package.project_name label = '{0}\n{1}'.format(project_name, package.version) graph.node(project_name, label=label) for dep in deps: label = dep.version_spec if not label: label = 'any' graph.edge(project_name, dep.project_name, label=label) # Allow output of dot format, even if GraphViz isn't installed. if output_format == 'dot': return graph.source # As it's unknown if the selected output format is binary or not, try to # decode it as UTF8 and only print it out in binary if that's not possible. try: return graph.pipe().decode('utf-8') except UnicodeDecodeError: return graph.pipe()
<SYSTEM_TASK:> Dump the data generated by GraphViz to stdout. <END_TASK> <USER_TASK:> Description: def print_graphviz(dump_output): """Dump the data generated by GraphViz to stdout. :param dump_output: The output from dump_graphviz """
if hasattr(dump_output, 'encode'): print(dump_output) else: with os.fdopen(sys.stdout.fileno(), 'wb') as bytestream: bytestream.write(dump_output)
<SYSTEM_TASK:> Returns dependencies which are not present or conflict with the <END_TASK> <USER_TASK:> Description: def conflicting_deps(tree): """Returns dependencies which are not present or conflict with the requirements of other packages. e.g. will warn if pkg1 requires pkg2==2.0 and pkg2==1.0 is installed :param tree: the requirements tree (dict) :returns: dict of DistPackage -> list of unsatisfied/unknown ReqPackage :rtype: dict """
conflicting = defaultdict(list) for p, rs in tree.items(): for req in rs: if req.is_conflicting(): conflicting[p].append(req) return conflicting
<SYSTEM_TASK:> Return cyclic dependencies as list of tuples <END_TASK> <USER_TASK:> Description: def cyclic_deps(tree): """Return cyclic dependencies as list of tuples :param list pkgs: pkg_resources.Distribution instances :param dict pkg_index: mapping of pkgs with their respective keys :returns: list of tuples representing cyclic dependencies :rtype: generator """
key_tree = dict((k.key, v) for k, v in tree.items()) get_children = lambda n: key_tree.get(n.key, []) cyclic = [] for p, rs in tree.items(): for req in rs: if p.key in map(attrgetter('key'), get_children(req)): cyclic.append((p, req, p)) return cyclic
<SYSTEM_TASK:> If installed version conflicts with required version <END_TASK> <USER_TASK:> Description: def is_conflicting(self): """If installed version conflicts with required version"""
# unknown installed version is also considered conflicting if self.installed_version == self.UNKNOWN_VERSION: return True ver_spec = (self.version_spec if self.version_spec else '') req_version_str = '{0}{1}'.format(self.project_name, ver_spec) req_obj = pkg_resources.Requirement.parse(req_version_str) return self.installed_version not in req_obj
<SYSTEM_TASK:> Check good hashes against ones built from iterable of chunks of <END_TASK> <USER_TASK:> Description: def check_against_chunks(self, chunks): # type: (Iterator[bytes]) -> None """Check good hashes against ones built from iterable of chunks of data. Raise HashMismatch if none match. """
gots = {} for hash_name in iterkeys(self._allowed): try: gots[hash_name] = hashlib.new(hash_name) except (ValueError, TypeError): raise InstallationError('Unknown hash name: %s' % hash_name) for chunk in chunks: for hash in itervalues(gots): hash.update(chunk) for hash_name, got in iteritems(gots): if got.hexdigest() in self._allowed[hash_name]: return self._raise(gots)
<SYSTEM_TASK:> Removes nodes by index from an errorpath, relatively to the <END_TASK> <USER_TASK:> Description: def _drop_nodes_from_errorpaths(self, _errors, dp_items, sp_items): """ Removes nodes by index from an errorpath, relatively to the basepaths of self. :param errors: A list of :class:`errors.ValidationError` instances. :param dp_items: A list of integers, pointing at the nodes to drop from the :attr:`document_path`. :param sp_items: Alike ``dp_items``, but for :attr:`schema_path`. """
dp_basedepth = len(self.document_path) sp_basedepth = len(self.schema_path) for error in _errors: for i in sorted(dp_items, reverse=True): error.document_path = \ drop_item_from_tuple(error.document_path, dp_basedepth + i) for i in sorted(sp_items, reverse=True): error.schema_path = \ drop_item_from_tuple(error.schema_path, sp_basedepth + i) if error.child_errors: self._drop_nodes_from_errorpaths(error.child_errors, dp_items, sp_items)
<SYSTEM_TASK:> Searches for a field as defined by path. This method is used by the <END_TASK> <USER_TASK:> Description: def _lookup_field(self, path): """ Searches for a field as defined by path. This method is used by the ``dependency`` evaluation logic. :param path: Path elements are separated by a ``.``. A leading ``^`` indicates that the path relates to the document root, otherwise it relates to the currently evaluated document, which is possibly a subdocument. The sequence ``^^`` at the start will be interpreted as a literal ``^``. :type path: :class:`str` :returns: Either the found field name and its value or :obj:`None` for both. :rtype: A two-value :class:`tuple`. """
if path.startswith('^'): path = path[1:] context = self.document if path.startswith('^') \ else self.root_document else: context = self.document parts = path.split('.') for part in parts: if part not in context: return None, None context = context.get(part) return parts[-1], context
<SYSTEM_TASK:> Drops rules from the queue of the rules that still need to be <END_TASK> <USER_TASK:> Description: def _drop_remaining_rules(self, *rules): """ Drops rules from the queue of the rules that still need to be evaluated for the currently processed field. If no arguments are given, the whole queue is emptied. """
if rules: for rule in rules: try: self._remaining_rules.remove(rule) except ValueError: pass else: self._remaining_rules = []
<SYSTEM_TASK:> Returns the document normalized according to the specified rules <END_TASK> <USER_TASK:> Description: def normalized(self, document, schema=None, always_return_document=False): """ Returns the document normalized according to the specified rules of a schema. :param document: The document to normalize. :type document: any :term:`mapping` :param schema: The validation schema. Defaults to :obj:`None`. If not provided here, the schema must have been provided at class instantiation. :type schema: any :term:`mapping` :param always_return_document: Return the document, even if an error occurred. Defaults to: ``False``. :type always_return_document: :class:`bool` :return: A normalized copy of the provided mapping or :obj:`None` if an error occurred during normalization. """
self.__init_processing(document, schema) self.__normalize_mapping(self.document, self.schema) self.error_handler.end(self) if self._errors and not always_return_document: return None else: return self.document
<SYSTEM_TASK:> Normalizes and validates a mapping against a validation-schema of <END_TASK> <USER_TASK:> Description: def validate(self, document, schema=None, update=False, normalize=True): """ Normalizes and validates a mapping against a validation-schema of defined rules. :param document: The document to normalize. :type document: any :term:`mapping` :param schema: The validation schema. Defaults to :obj:`None`. If not provided here, the schema must have been provided at class instantiation. :type schema: any :term:`mapping` :param update: If ``True``, required fields won't be checked. :type update: :class:`bool` :param normalize: If ``True``, normalize the document before validation. :type normalize: :class:`bool` :return: ``True`` if validation succeeds, otherwise ``False``. Check the :func:`errors` property for a list of processing errors. :rtype: :class:`bool` """
self.update = update self._unrequired_by_excludes = set() self.__init_processing(document, schema) if normalize: self.__normalize_mapping(self.document, self.schema) for field in self.document: if self.ignore_none_values and self.document[field] is None: continue definitions = self.schema.get(field) if definitions is not None: self.__validate_definitions(definitions, field) else: self.__validate_unknown_fields(field) if not self.update: self.__validate_required_fields(self.document) self.error_handler.end(self) return not bool(self._errors)
<SYSTEM_TASK:> Validates value against all definitions and logs errors according <END_TASK> <USER_TASK:> Description: def __validate_logical(self, operator, definitions, field, value): """ Validates value against all definitions and logs errors according to the operator. """
valid_counter = 0 _errors = errors.ErrorList() for i, definition in enumerate(definitions): schema = {field: definition.copy()} for rule in ('allow_unknown', 'type'): if rule not in schema[field] and rule in self.schema[field]: schema[field][rule] = self.schema[field][rule] if 'allow_unknown' not in schema[field]: schema[field]['allow_unknown'] = self.allow_unknown validator = self._get_child_validator( schema_crumb=(field, operator, i), schema=schema, allow_unknown=True) if validator(self.document, update=self.update, normalize=False): valid_counter += 1 else: self._drop_nodes_from_errorpaths(validator._errors, [], [3]) _errors.extend(validator._errors) return valid_counter, _errors
<SYSTEM_TASK:> Validates that required fields are not missing. <END_TASK> <USER_TASK:> Description: def __validate_required_fields(self, document): """ Validates that required fields are not missing. :param document: The document being validated. """
try: required = set(field for field, definition in self.schema.items() if self._resolve_rules_set(definition). get('required') is True) except AttributeError: if self.is_child and self.schema_path[-1] == 'schema': raise _SchemaRuleTypeError else: raise required -= self._unrequired_by_excludes missing = required - set(field for field in document if document.get(field) is not None or not self.ignore_none_values) for field in missing: self._error(field, errors.REQUIRED_FIELD) # At least on field from self._unrequired_by_excludes should be # present in document if self._unrequired_by_excludes: fields = set(field for field in document if document.get(field) is not None) if self._unrequired_by_excludes.isdisjoint(fields): for field in self._unrequired_by_excludes - fields: self._error(field, errors.REQUIRED_FIELD)
<SYSTEM_TASK:> Returns a callable that looks up the given attribute from a <END_TASK> <USER_TASK:> Description: def make_attrgetter(environment, attribute, postprocess=None): """Returns a callable that looks up the given attribute from a passed object with the rules of the environment. Dots are allowed to access attributes of attributes. Integer parts in paths are looked up as integers. """
if attribute is None: attribute = [] elif isinstance(attribute, string_types): attribute = [int(x) if x.isdigit() else x for x in attribute.split('.')] else: attribute = [attribute] def attrgetter(item): for part in attribute: item = environment.getitem(item, part) if postprocess is not None: item = postprocess(item) return item return attrgetter
<SYSTEM_TASK:> Return a titlecased version of the value. I.e. words will start with <END_TASK> <USER_TASK:> Description: def do_title(s): """Return a titlecased version of the value. I.e. words will start with uppercase letters, all remaining characters are lowercase. """
return ''.join( [item[0].upper() + item[1:].lower() for item in _word_beginning_split_re.split(soft_unicode(s)) if item])
<SYSTEM_TASK:> Sort an iterable. Per default it sorts ascending, if you pass it <END_TASK> <USER_TASK:> Description: def do_sort( environment, value, reverse=False, case_sensitive=False, attribute=None ): """Sort an iterable. Per default it sorts ascending, if you pass it true as first argument it will reverse the sorting. If the iterable is made of strings the third parameter can be used to control the case sensitiveness of the comparison which is disabled by default. .. sourcecode:: jinja {% for item in iterable|sort %} ... {% endfor %} It is also possible to sort by an attribute (for example to sort by the date of an object) by specifying the `attribute` parameter: .. sourcecode:: jinja {% for item in iterable|sort(attribute='date') %} ... {% endfor %} .. versionchanged:: 2.6 The `attribute` parameter was added. """
key_func = make_attrgetter( environment, attribute, postprocess=ignore_case if not case_sensitive else None ) return sorted(value, key=key_func, reverse=reverse)
<SYSTEM_TASK:> Returns a list of unique items from the the given iterable. <END_TASK> <USER_TASK:> Description: def do_unique(environment, value, case_sensitive=False, attribute=None): """Returns a list of unique items from the the given iterable. .. sourcecode:: jinja {{ ['foo', 'bar', 'foobar', 'FooBar']|unique }} -> ['foo', 'bar', 'foobar'] The unique items are yielded in the same order as their first occurrence in the iterable passed to the filter. :param case_sensitive: Treat upper and lower case strings as distinct. :param attribute: Filter objects with unique values for this attribute. """
getter = make_attrgetter( environment, attribute, postprocess=ignore_case if not case_sensitive else None ) seen = set() for item in value: key = getter(item) if key not in seen: seen.add(key) yield item
<SYSTEM_TASK:> Return the smallest item from the sequence. <END_TASK> <USER_TASK:> Description: def do_min(environment, value, case_sensitive=False, attribute=None): """Return the smallest item from the sequence. .. sourcecode:: jinja {{ [1, 2, 3]|min }} -> 1 :param case_sensitive: Treat upper and lower case strings as distinct. :param attribute: Get the object with the max value of this attribute. """
return _min_or_max(environment, value, min, case_sensitive, attribute)
<SYSTEM_TASK:> Return the largest item from the sequence. <END_TASK> <USER_TASK:> Description: def do_max(environment, value, case_sensitive=False, attribute=None): """Return the largest item from the sequence. .. sourcecode:: jinja {{ [1, 2, 3]|max }} -> 3 :param case_sensitive: Treat upper and lower case strings as distinct. :param attribute: Get the object with the max value of this attribute. """
return _min_or_max(environment, value, max, case_sensitive, attribute)
<SYSTEM_TASK:> Return the last item of a sequence. <END_TASK> <USER_TASK:> Description: def do_last(environment, seq): """Return the last item of a sequence."""
try: return next(iter(reversed(seq))) except StopIteration: return environment.undefined('No last item, sequence was empty.')
<SYSTEM_TASK:> Return a copy of the string with each line indented by 4 spaces. The <END_TASK> <USER_TASK:> Description: def do_indent( s, width=4, first=False, blank=False, indentfirst=None ): """Return a copy of the string with each line indented by 4 spaces. The first line and blank lines are not indented by default. :param width: Number of spaces to indent by. :param first: Don't skip indenting the first line. :param blank: Don't skip indenting empty lines. .. versionchanged:: 2.10 Blank lines are not indented by default. Rename the ``indentfirst`` argument to ``first``. """
if indentfirst is not None: warnings.warn(DeprecationWarning( 'The "indentfirst" argument is renamed to "first".' ), stacklevel=2) first = indentfirst s += u'\n' # this quirk is necessary for splitlines method indention = u' ' * width if blank: rv = (u'\n' + indention).join(s.splitlines()) else: lines = s.splitlines() rv = lines.pop(0) if lines: rv += u'\n' + u'\n'.join( indention + line if line else line for line in lines ) if first: rv = indention + rv return rv
<SYSTEM_TASK:> Return a copy of the string passed to the filter wrapped after <END_TASK> <USER_TASK:> Description: def do_wordwrap(environment, s, width=79, break_long_words=True, wrapstring=None): """ Return a copy of the string passed to the filter wrapped after ``79`` characters. You can override this default using the first parameter. If you set the second parameter to `false` Jinja will not split words apart if they are longer than `width`. By default, the newlines will be the default newlines for the environment, but this can be changed using the wrapstring keyword argument. .. versionadded:: 2.7 Added support for the `wrapstring` parameter. """
if not wrapstring: wrapstring = environment.newline_sequence import textwrap return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False, replace_whitespace=False, break_long_words=break_long_words))
<SYSTEM_TASK:> Reverse the object or return an iterator that iterates over it the other <END_TASK> <USER_TASK:> Description: def do_reverse(value): """Reverse the object or return an iterator that iterates over it the other way round. """
if isinstance(value, string_types): return value[::-1] try: return reversed(value) except TypeError: try: rv = list(value) rv.reverse() return rv except TypeError: raise FilterArgumentError('argument must be iterable')
<SYSTEM_TASK:> Applies a filter on a sequence of objects or looks up an attribute. <END_TASK> <USER_TASK:> Description: def do_map(*args, **kwargs): """Applies a filter on a sequence of objects or looks up an attribute. This is useful when dealing with lists of objects but you are really only interested in a certain value of it. The basic usage is mapping on an attribute. Imagine you have a list of users but you are only interested in a list of usernames: .. sourcecode:: jinja Users on this page: {{ users|map(attribute='username')|join(', ') }} Alternatively you can let it invoke a filter by passing the name of the filter and the arguments afterwards. A good example would be applying a text conversion filter on a sequence: .. sourcecode:: jinja Users on this page: {{ titles|map('lower')|join(', ') }} .. versionadded:: 2.7 """
seq, func = prepare_map(args, kwargs) if seq: for item in seq: yield func(item)
<SYSTEM_TASK:> If ``completion_type`` is ``file`` or ``path``, list all regular files <END_TASK> <USER_TASK:> Description: def auto_complete_paths(current, completion_type): """If ``completion_type`` is ``file`` or ``path``, list all regular files and directories starting with ``current``; otherwise only list directories starting with ``current``. :param current: The word to be completed :param completion_type: path completion type(`file`, `path` or `dir`)i :return: A generator of regular files and/or directories """
directory, filename = os.path.split(current) current_path = os.path.abspath(directory) # Don't complete paths if they can't be accessed if not os.access(current_path, os.R_OK): return filename = os.path.normcase(filename) # list all files that start with ``filename`` file_list = (x for x in os.listdir(current_path) if os.path.normcase(x).startswith(filename)) for f in file_list: opt = os.path.join(current_path, f) comp_file = os.path.normcase(os.path.join(directory, f)) # complete regular files when there is not ``<dir>`` after option # complete directories when there is ``<file>``, ``<path>`` or # ``<dir>``after option if completion_type != 'dir' and os.path.isfile(opt): yield comp_file elif os.path.isdir(opt): yield os.path.join(comp_file, '')
<SYSTEM_TASK:> Build a wheel. <END_TASK> <USER_TASK:> Description: def _build_wheel_modern(ireq, output_dir, finder, wheel_cache, kwargs): """Build a wheel. * ireq: The InstallRequirement object to build * output_dir: The directory to build the wheel in. * finder: pip's internal Finder object to find the source out of ireq. * kwargs: Various keyword arguments from `_prepare_wheel_building_kwargs`. """
kwargs.update({"progress_bar": "off", "build_isolation": False}) with pip_shims.RequirementTracker() as req_tracker: if req_tracker: kwargs["req_tracker"] = req_tracker preparer = pip_shims.RequirementPreparer(**kwargs) builder = pip_shims.WheelBuilder(finder, preparer, wheel_cache) return builder._build_one(ireq, output_dir)
<SYSTEM_TASK:> Get python version string using subprocess from a given path. <END_TASK> <USER_TASK:> Description: def get_python_version(path): # type: (str) -> str """Get python version string using subprocess from a given path."""
version_cmd = [path, "-c", "import sys; print(sys.version.split()[0])"] try: c = vistir.misc.run( version_cmd, block=True, nospin=True, return_object=True, combine_stderr=False, write_to_stdout=False, ) except OSError: raise InvalidPythonVersion("%s is not a valid python path" % path) if not c.out: raise InvalidPythonVersion("%s is not a valid python path" % path) return c.out.strip()
<SYSTEM_TASK:> Returns whether a given path is a known executable from known executable extensions <END_TASK> <USER_TASK:> Description: def path_is_known_executable(path): # type: (vistir.compat.Path) -> bool """ Returns whether a given path is a known executable from known executable extensions or has the executable bit toggled. :param path: The path to the target executable. :type path: :class:`~vistir.compat.Path` :return: True if the path has chmod +x, or is a readable, known executable extension. :rtype: bool """
return ( path_is_executable(path) or os.access(str(path), os.R_OK) and path.suffix in KNOWN_EXTS )
<SYSTEM_TASK:> Determine whether the supplied filename looks like a possible name of python. <END_TASK> <USER_TASK:> Description: def looks_like_python(name): # type: (str) -> bool """ Determine whether the supplied filename looks like a possible name of python. :param str name: The name of the provided file. :return: Whether the provided name looks like python. :rtype: bool """
if not any(name.lower().startswith(py_name) for py_name in PYTHON_IMPLEMENTATIONS): return False match = RE_MATCHER.match(name) if match: return any(fnmatch(name, rule) for rule in MATCH_RULES) return False
<SYSTEM_TASK:> Get parts of part that must be os.path.joined with cache_dir <END_TASK> <USER_TASK:> Description: def _get_cache_path_parts(self, link): # type: (Link) -> List[str] """Get parts of part that must be os.path.joined with cache_dir """
# We want to generate an url to use as our cache key, we don't want to # just re-use the URL because it might have other items in the fragment # and we don't care about those. key_parts = [link.url_without_fragment] if link.hash_name is not None and link.hash is not None: key_parts.append("=".join([link.hash_name, link.hash])) key_url = "#".join(key_parts) # Encode our key url with sha224, we'll use this because it has similar # security properties to sha256, but with a shorter total output (and # thus less secure). However the differences don't make a lot of # difference for our use case here. hashed = hashlib.sha224(key_url.encode()).hexdigest() # We want to nest the directories some to prevent having a ton of top # level directories where we might run out of sub directories on some # FS. parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]] return parts
<SYSTEM_TASK:> Return a directory to store cached wheels for link <END_TASK> <USER_TASK:> Description: def get_path_for_link(self, link): # type: (Link) -> str """Return a directory to store cached wheels for link Because there are M wheels for any one sdist, we provide a directory to cache them in, and then consult that directory when looking up cache hits. We only insert things into the cache if they have plausible version numbers, so that we don't contaminate the cache with things that were not unique. E.g. ./package might have dozens of installs done for it and build a version of 0.0...and if we built and cached a wheel, we'd end up using the same wheel even if the source has been edited. :param link: The link of the sdist for which this will cache wheels. """
parts = self._get_cache_path_parts(link) # Store wheels within the root cache_dir return os.path.join(self.cache_dir, "wheels", *parts)
<SYSTEM_TASK:> Retrieves dependencies for the requirement from the dependency cache. <END_TASK> <USER_TASK:> Description: def _get_dependencies_from_cache(ireq): """Retrieves dependencies for the requirement from the dependency cache. """
if os.environ.get("PASSA_IGNORE_LOCAL_CACHE"): return if ireq.editable: return try: deps = DEPENDENCY_CACHE[ireq] pyrq = REQUIRES_PYTHON_CACHE[ireq] except KeyError: return # Preserving sanity: Run through the cache and make sure every entry if # valid. If this fails, something is wrong with the cache. Drop it. try: packaging.specifiers.SpecifierSet(pyrq) ireq_name = packaging.utils.canonicalize_name(ireq.name) if any(_is_cache_broken(line, ireq_name) for line in deps): broken = True else: broken = False except Exception: broken = True if broken: print("dropping broken cache for {0}".format(ireq.name)) del DEPENDENCY_CACHE[ireq] del REQUIRES_PYTHON_CACHE[ireq] return return deps, pyrq
<SYSTEM_TASK:> Retrieves dependencies for the install requirement from the JSON API. <END_TASK> <USER_TASK:> Description: def _get_dependencies_from_json(ireq, sources): """Retrieves dependencies for the install requirement from the JSON API. :param ireq: A single InstallRequirement :type ireq: :class:`~pip._internal.req.req_install.InstallRequirement` :return: A set of dependency lines for generating new InstallRequirements. :rtype: set(str) or None """
if os.environ.get("PASSA_IGNORE_JSON_API"): return # It is technically possible to parse extras out of the JSON API's # requirement format, but it is such a chore let's just use the simple API. if ireq.extras: return try: version = get_pinned_version(ireq) except ValueError: return url_prefixes = [ proc_url[:-7] # Strip "/simple". for proc_url in ( raw_url.rstrip("/") for raw_url in (source.get("url", "") for source in sources) ) if proc_url.endswith("/simple") ] session = requests.session() for prefix in url_prefixes: url = "{prefix}/pypi/{name}/{version}/json".format( prefix=prefix, name=packaging.utils.canonicalize_name(ireq.name), version=version, ) try: dependencies = _get_dependencies_from_json_url(url, session) if dependencies is not None: return dependencies except Exception as e: print("unable to read dependencies via {0} ({1})".format(url, e)) session.close() return
<SYSTEM_TASK:> Read wheel metadata to know what it depends on. <END_TASK> <USER_TASK:> Description: def _read_requirements(metadata, extras): """Read wheel metadata to know what it depends on. The `run_requires` attribute contains a list of dict or str specifying requirements. For dicts, it may contain an "extra" key to specify these requirements are for a specific extra. Unfortunately, not all fields are specificed like this (I don't know why); some are specified with markers. So we jump though these terrible hoops to know exactly what we need. The extra extraction is not comprehensive. Tt assumes the marker is NEVER something like `extra == "foo" and extra == "bar"`. I guess this never makes sense anyway? Markers are just terrible. """
extras = extras or () requirements = [] for entry in metadata.run_requires: if isinstance(entry, six.text_type): entry = {"requires": [entry]} extra = None else: extra = entry.get("extra") if extra is not None and extra not in extras: continue for line in entry.get("requires", []): r = requirementslib.Requirement.from_line(line) if r.markers: contained = get_contained_extras(r.markers) if (contained and not any(e in contained for e in extras)): continue marker = get_without_extra(r.markers) r.markers = str(marker) if marker else None line = r.as_line(include_hashes=False) requirements.append(line) return requirements
<SYSTEM_TASK:> Read wheel metadata to know the value of Requires-Python. <END_TASK> <USER_TASK:> Description: def _read_requires_python(metadata): """Read wheel metadata to know the value of Requires-Python. This is surprisingly poorly supported in Distlib. This function tries several ways to get this information: * Metadata 2.0: metadata.dictionary.get("requires_python") is not None * Metadata 2.1: metadata._legacy.get("Requires-Python") is not None * Metadata 1.2: metadata._legacy.get("Requires-Python") != "UNKNOWN" """
# TODO: Support more metadata formats. value = metadata.dictionary.get("requires_python") if value is not None: return value if metadata._legacy: value = metadata._legacy.get("Requires-Python") if value is not None and value != "UNKNOWN": return value return ""
<SYSTEM_TASK:> Retrieves dependencies for the requirement from pipenv.patched.notpip internals. <END_TASK> <USER_TASK:> Description: def _get_dependencies_from_pip(ireq, sources): """Retrieves dependencies for the requirement from pipenv.patched.notpip internals. The current strategy is to try the followings in order, returning the first successful result. 1. Try to build a wheel out of the ireq, and read metadata out of it. 2. Read metadata out of the egg-info directory if it is present. """
extras = ireq.extras or () try: wheel = build_wheel(ireq, sources) except WheelBuildError: # XXX: This depends on a side effect of `build_wheel`. This block is # reached when it fails to build an sdist, where the sdist would have # been downloaded, extracted into `ireq.source_dir`, and partially # built (hopefully containing .egg-info). metadata = read_sdist_metadata(ireq) if not metadata: raise else: metadata = wheel.metadata requirements = _read_requirements(metadata, extras) requires_python = _read_requires_python(metadata) return requirements, requires_python
<SYSTEM_TASK:> Helper function to format and quote a single header parameter. <END_TASK> <USER_TASK:> Description: def format_header_param(name, value): """ Helper function to format and quote a single header parameter. Particularly useful for header parameters which might contain non-ASCII values, like file names. This follows RFC 2231, as suggested by RFC 2388 Section 4.4. :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as a unicode string. """
if not any(ch in value for ch in '"\\\r\n'): result = '%s="%s"' % (name, value) try: result.encode('ascii') except (UnicodeEncodeError, UnicodeDecodeError): pass else: return result if not six.PY3 and isinstance(value, six.text_type): # Python 2: value = value.encode('utf-8') value = email.utils.encode_rfc2231(value, 'utf-8') value = '%s*=%s' % (name, value) return value
<SYSTEM_TASK:> Helper function to format and quote a single header. <END_TASK> <USER_TASK:> Description: def _render_parts(self, header_parts): """ Helper function to format and quote a single header. Useful for single headers that are composed of multiple items. E.g., 'Content-Disposition' fields. :param header_parts: A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format as `k1="v1"; k2="v2"; ...`. """
parts = [] iterable = header_parts if isinstance(header_parts, dict): iterable = header_parts.items() for name, value in iterable: if value is not None: parts.append(self._render_part(name, value)) return '; '.join(parts)
<SYSTEM_TASK:> Renders the headers for this request field. <END_TASK> <USER_TASK:> Description: def render_headers(self): """ Renders the headers for this request field. """
lines = [] sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] for sort_key in sort_keys: if self.headers.get(sort_key, False): lines.append('%s: %s' % (sort_key, self.headers[sort_key])) for header_name, header_value in self.headers.items(): if header_name not in sort_keys: if header_value: lines.append('%s: %s' % (header_name, header_value)) lines.append('\r\n') return '\r\n'.join(lines)
<SYSTEM_TASK:> Makes this request field into a multipart request field. <END_TASK> <USER_TASK:> Description: def make_multipart(self, content_disposition=None, content_type=None, content_location=None): """ Makes this request field into a multipart request field. This method overrides "Content-Disposition", "Content-Type" and "Content-Location" headers to the request parameter. :param content_type: The 'Content-Type' of the request body. :param content_location: The 'Content-Location' of the request body. """
self.headers['Content-Disposition'] = content_disposition or 'form-data' self.headers['Content-Disposition'] += '; '.join([ '', self._render_parts( (('name', self._name), ('filename', self._filename)) ) ]) self.headers['Content-Type'] = content_type self.headers['Content-Location'] = content_location