desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Call the wrapped function; override this in a subclass to change how the function is called.'
def call_func(self, req, *args, **kwargs):
return self.func(req, *args, **kwargs)
'Creates a copy/clone of this object, but with some parameters rebound'
def clone(self, func=None, **kw):
kwargs = {} if (func is not None): kwargs['func'] = func if (self.RequestClass is not self.__class__.RequestClass): kwargs['RequestClass'] = self.RequestClass if self.args: kwargs['args'] = self.args if self.kwargs: kwargs['kwargs'] = self.kwargs kwargs.update(kw) return self.__class__(**kwargs)
'Creates middleware Use this like:: @wsgify.middleware def restrict_ip(app, req, ips): if req.remote_addr not in ips: raise webob.exc.HTTPForbidden(\'Bad IP: %s\' % req.remote_addr) return app @wsgify def app(req): return \'hi\' wrapped = restrict_ip(app, ips=[\'127.0.0.1\']) Or if you want to write output-rewriting middleware:: @wsgify.middleware def all_caps(app, req): resp = req.get_response(app) resp.body = resp.body.upper() return resp wrapped = all_caps(app) Note that you must call ``req.get_response(app)`` to get a WebOb response object. If you are not modifying the output, you can just return the app. As you can see, this method doesn\'t actually create an application, but creates "middleware" that can be bound to an application, along with "configuration" (that is, any other keyword arguments you pass when binding the application).'
@classmethod def middleware(cls, middle_func=None, app=None, **kw):
if (middle_func is None): return _UnboundMiddleware(cls, app, kw) if (app is None): return _MiddlewareFactory(cls, middle_func, kw) return cls(middle_func, middleware_wraps=app, kwargs=kw)
'Input stream of the request (wsgi.input). Setting this property resets the content_length and seekable flag (unlike setting req.body_file_raw).'
def _body_file__get(self):
if (not self.is_body_readable): return io.BytesIO() r = self.body_file_raw clen = self.content_length if ((not self.is_body_seekable) and (clen is not None)): env = self.environ (wrapped, raw) = env.get('webob._body_file', (0, 0)) if (raw is not r): wrapped = LimitedLengthFile(r, clen) wrapped = io.BufferedReader(wrapped) env['webob._body_file'] = (wrapped, r) r = wrapped return r
'Get the body of the request (wsgi.input) as a seekable file-like object. Middleware and routing applications should use this attribute over .body_file. If you access this value, CONTENT_LENGTH will also be updated.'
@property def body_file_seekable(self):
if (not self.is_body_seekable): self.make_body_seekable() return self.body_file_raw
'Return the content type, but leaving off any parameters (like charset, but also things like the type in ``application/atom+xml; type=entry``) If you set this property, you can include parameters, or if you don\'t include any parameters in the value then existing parameters will be preserved.'
def _content_type__get(self):
return self._content_type_raw.split(';', 1)[0]
'All the request headers as a case-insensitive dictionary-like object.'
def _headers__get(self):
if (self._headers is None): self._headers = EnvironHeaders(self.environ) return self._headers
'The effective client IP address as a string. If the ``HTTP_X_FORWARDED_FOR`` header exists in the WSGI environ, this attribute returns the client IP address present in that header (e.g. if the header value is ``192.168.1.1, 192.168.1.2``, the value will be ``192.168.1.1``). If no ``HTTP_X_FORWARDED_FOR`` header is present in the environ at all, this attribute will return the value of the ``REMOTE_ADDR`` header. If the ``REMOTE_ADDR`` header is unset, this attribute will return the value ``None``. .. warning:: It is possible for user agents to put someone else\'s IP or just any string in ``HTTP_X_FORWARDED_FOR`` as it is a normal HTTP header. Forward proxies can also provide incorrect values (private IP addresses etc). You cannot "blindly" trust the result of this method to provide you with valid data unless you\'re certain that ``HTTP_X_FORWARDED_FOR`` has the correct values. The WSGI server must be behind a trusted proxy for this to be true.'
@property def client_addr(self):
e = self.environ xff = e.get('HTTP_X_FORWARDED_FOR') if (xff is not None): addr = xff.split(',')[0].strip() else: addr = e.get('REMOTE_ADDR') return addr
'The effective server port number as a string. If the ``HTTP_HOST`` header exists in the WSGI environ, this attribute returns the port number present in that header. If the ``HTTP_HOST`` header exists but contains no explicit port number: if the WSGI url scheme is "https" , this attribute returns "443", if the WSGI url scheme is "http", this attribute returns "80" . If no ``HTTP_HOST`` header is present in the environ at all, this attribute will return the value of the ``SERVER_PORT`` header (which is guaranteed to be present).'
@property def host_port(self):
e = self.environ host = e.get('HTTP_HOST') if (host is not None): if (':' in host): (host, port) = host.split(':', 1) else: url_scheme = e['wsgi.url_scheme'] if (url_scheme == 'https'): port = '443' else: port = '80' else: port = e['SERVER_PORT'] return port
'The URL through the host (no path)'
@property def host_url(self):
e = self.environ scheme = e.get('wsgi.url_scheme') url = (scheme + '://') host = e.get('HTTP_HOST') if (host is not None): if (':' in host): (host, port) = host.split(':', 1) else: port = None else: host = e.get('SERVER_NAME') port = e.get('SERVER_PORT') if (scheme == 'https'): if (port == '443'): port = None elif (scheme == 'http'): if (port == '80'): port = None url += host if port: url += (':%s' % port) return url
'The URL including SCRIPT_NAME (no PATH_INFO or query string)'
@property def application_url(self):
bscript_name = bytes_(self.script_name, self.url_encoding) return (self.host_url + url_quote(bscript_name, PATH_SAFE))
'The URL including SCRIPT_NAME and PATH_INFO, but not QUERY_STRING'
@property def path_url(self):
bpath_info = bytes_(self.path_info, self.url_encoding) return (self.application_url + url_quote(bpath_info, PATH_SAFE))
'The path of the request, without host or query string'
@property def path(self):
bscript = bytes_(self.script_name, self.url_encoding) bpath = bytes_(self.path_info, self.url_encoding) return (url_quote(bscript, PATH_SAFE) + url_quote(bpath, PATH_SAFE))
'The path of the request, without host but with query string'
@property def path_qs(self):
path = self.path qs = self.environ.get('QUERY_STRING') if qs: path += ('?' + qs) return path
'The full request URL, including QUERY_STRING'
@property def url(self):
url = self.path_url qs = self.environ.get('QUERY_STRING') if qs: url += ('?' + qs) return url
'Resolve other_url relative to the request URL. If ``to_application`` is True, then resolve it relative to the URL with only SCRIPT_NAME'
def relative_url(self, other_url, to_application=False):
if to_application: url = self.application_url if (not url.endswith('/')): url += '/' else: url = self.path_url return urlparse.urljoin(url, other_url)
'\'Pops\' off the next segment of PATH_INFO, pushing it onto SCRIPT_NAME, and returning the popped segment. Returns None if there is nothing left on PATH_INFO. Does not return ``\'\'`` when there\'s an empty segment (like ``/path//path``); these segments are just ignored. Optional ``pattern`` argument is a regexp to match the return value before returning. If there is no match, no changes are made to the request and None is returned.'
def path_info_pop(self, pattern=None):
path = self.path_info if (not path): return None slashes = '' while path.startswith('/'): slashes += '/' path = path[1:] idx = path.find('/') if (idx == (-1)): idx = len(path) r = path[:idx] if ((pattern is None) or re.match(pattern, r)): self.script_name += (slashes + r) self.path_info = path[idx:] return r
'Returns the next segment on PATH_INFO, or None if there is no next segment. Doesn\'t modify the environment.'
def path_info_peek(self):
path = self.path_info if (not path): return None path = path.lstrip('/') return path.split('/', 1)[0]
'Return any *named* variables matched in the URL. Takes values from ``environ[\'wsgiorg.routing_args\']``. Systems like ``routes`` set this value.'
def _urlvars__get(self):
if ('paste.urlvars' in self.environ): return self.environ['paste.urlvars'] elif ('wsgiorg.routing_args' in self.environ): return self.environ['wsgiorg.routing_args'][1] else: result = {} self.environ['wsgiorg.routing_args'] = ((), result) return result
'Return any *positional* variables matched in the URL. Takes values from ``environ[\'wsgiorg.routing_args\']``. Systems like ``routes`` set this value.'
def _urlargs__get(self):
if ('wsgiorg.routing_args' in self.environ): return self.environ['wsgiorg.routing_args'][0] else: return ()
'Is X-Requested-With header present and equal to ``XMLHttpRequest``? Note: this isn\'t set by every XMLHttpRequest request, it is only set if you are using a Javascript library that sets it (or you set the header yourself manually). Currently Prototype and jQuery are known to set this header.'
@property def is_xhr(self):
return (self.environ.get('HTTP_X_REQUESTED_WITH', '') == 'XMLHttpRequest')
'Host name provided in HTTP_HOST, with fall-back to SERVER_NAME'
def _host__get(self):
if ('HTTP_HOST' in self.environ): return self.environ['HTTP_HOST'] else: return ('%(SERVER_NAME)s:%(SERVER_PORT)s' % self.environ)
'Return the content of the request body.'
def _body__get(self):
if (not self.is_body_readable): return '' self.make_body_seekable() r = self.body_file.read(self.content_length) self.body_file_raw.seek(0) return r
'Access the body of the request as JSON'
def _json_body__get(self):
return json.loads(self.body.decode(self.charset))
'Get/set the text value of the body'
def _text__get(self):
if (not self.charset): raise AttributeError('You cannot access Request.text unless charset is set') body = self.body return body.decode(self.charset)
'Return a MultiDict containing all the variables from a form request. Returns an empty dict-like object for non-form requests. Form requests are typically POST requests, however PUT requests with an appropriate Content-Type are also supported.'
@property def POST(self):
env = self.environ if (self.method not in ('POST', 'PUT')): return NoVars('Not a form request') if ('webob._parsed_post_vars' in env): (vars, body_file) = env['webob._parsed_post_vars'] if (body_file is self.body_file_raw): return vars content_type = self.content_type if (((self.method == 'PUT') and (not content_type)) or (content_type not in ('', 'application/x-www-form-urlencoded', 'multipart/form-data'))): return NoVars(('Not an HTML form submission (Content-Type: %s)' % content_type)) self._check_charset() if self.is_body_seekable: self.body_file_raw.seek(0) fs_environ = env.copy() fs_environ.setdefault('CONTENT_LENGTH', '0') fs_environ['QUERY_STRING'] = '' if PY3: fs = cgi.FieldStorage(fp=self.body_file, environ=fs_environ, keep_blank_values=True, encoding='utf8') vars = MultiDict.from_fieldstorage(fs) else: fs = cgi.FieldStorage(fp=self.body_file, environ=fs_environ, keep_blank_values=True) vars = MultiDict.from_fieldstorage(fs) ctype = (self._content_type_raw or 'application/x-www-form-urlencoded') f = FakeCGIBody(vars, ctype) self.body_file = io.BufferedReader(f) env['webob._parsed_post_vars'] = (vars, self.body_file_raw) return vars
'Return a MultiDict containing all the variables from the QUERY_STRING.'
@property def GET(self):
env = self.environ source = env.get('QUERY_STRING', '') if ('webob._parsed_query_vars' in env): (vars, qs) = env['webob._parsed_query_vars'] if (qs == source): return vars data = [] if source: data = parse_qsl_text(source) vars = GetDict(data, env) env['webob._parsed_query_vars'] = (vars, source) return vars
'A dictionary-like object containing both the parameters from the query string and request body.'
@property def params(self):
params = NestedMultiDict(self.GET, self.POST) return params
'Return a dictionary of cookies as found in the request.'
@property def cookies(self):
return RequestCookies(self.environ)
'Copy the request and environment object. This only does a shallow copy, except of wsgi.input'
def copy(self):
self.make_body_seekable() env = self.environ.copy() new_req = self.__class__(env) new_req.copy_body() return new_req
'Copies the request and environment object, but turning this request into a GET along the way. If this was a POST request (or any other verb) then it becomes GET, and the request body is thrown away.'
def copy_get(self):
env = self.environ.copy() return self.__class__(env, method='GET', content_type=None, body='')
'webob.is_body_readable is a flag that tells us that we can read the input stream even though CONTENT_LENGTH is missing. This allows FakeCGIBody to work and can be used by servers to support chunked encoding in requests. For background see https://bitbucket.org/ianb/webob/issue/6'
def _is_body_readable__get(self):
if http_method_probably_has_body.get(self.method): return True elif (self.content_length is not None): return True else: return self.environ.get('webob.is_body_readable', False)
'This forces ``environ[\'wsgi.input\']`` to be seekable. That means that, the content is copied into a BytesIO or temporary file and flagged as seekable, so that it will not be unnecessarily copied again. After calling this method the .body_file is always seeked to the start of file and .content_length is not None. The choice to copy to BytesIO is made from ``self.request_body_tempfile_limit``'
def make_body_seekable(self):
if self.is_body_seekable: self.body_file_raw.seek(0) else: self.copy_body()
'Copies the body, in cases where it might be shared with another request object and that is not desired. This copies the body in-place, either into a BytesIO object or a temporary file.'
def copy_body(self):
if (not self.is_body_readable): self.body = '' elif (self.content_length is None): self.body = self.body_file_raw.read() self._copy_body_tempfile() else: did_copy = self._copy_body_tempfile() if (not did_copy): self.body = self.body_file.read(self.content_length)
'Copy wsgi.input to tempfile if necessary. Returns True if it did.'
def _copy_body_tempfile(self):
tempfile_limit = self.request_body_tempfile_limit todo = self.content_length assert isinstance(todo, integer_types), todo if ((not tempfile_limit) or (todo <= tempfile_limit)): return False fileobj = self.make_tempfile() input = self.body_file while (todo > 0): data = input.read(min(todo, 65536)) if (not data): raise DisconnectionError(('Client disconnected (%s more bytes were expected)' % todo)) fileobj.write(data) todo -= len(data) fileobj.seek(0) self.body_file_raw = fileobj self.is_body_seekable = True return True
'Create a tempfile to store big request body. This API is not stable yet. A \'size\' argument might be added.'
def make_tempfile(self):
return tempfile.TemporaryFile()
'Remove headers that make the request conditional. These headers can cause the response to be 304 Not Modified, which in some cases you may not want to be possible. This does not remove headers like If-Match, which are used for conflict detection.'
def remove_conditional_headers(self, remove_encoding=True, remove_range=True, remove_match=True, remove_modified=True):
check_keys = [] if remove_range: check_keys += ['HTTP_IF_RANGE', 'HTTP_RANGE'] if remove_match: check_keys.append('HTTP_IF_NONE_MATCH') if remove_modified: check_keys.append('HTTP_IF_MODIFIED_SINCE') if remove_encoding: check_keys.append('HTTP_ACCEPT_ENCODING') for key in check_keys: if (key in self.environ): del self.environ[key]
'Get/set/modify the Cache-Control header (`HTTP spec section 14.9 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9>`_)'
def _cache_control__get(self):
env = self.environ value = env.get('HTTP_CACHE_CONTROL', '') (cache_header, cache_obj) = env.get('webob._cache_control', (None, None)) if ((cache_obj is not None) and (cache_header == value)): return cache_obj cache_obj = CacheControl.parse(value, updates_to=self._update_cache_control, type='request') env['webob._cache_control'] = (value, cache_obj) return cache_obj
'Return HTTP bytes representing this request. If skip_body is True, exclude the body. If skip_body is an integer larger than one, skip body only if its length is bigger than that number.'
def as_bytes(self, skip_body=False):
url = self.url host = self.host_url assert url.startswith(host) url = url[len(host):] parts = [bytes_(('%s %s %s' % (self.method, url, self.http_version)))] body = None if (self.method in ('PUT', 'POST')): if (skip_body > 1): if (len(self.body) > skip_body): body = bytes_(('<body skipped (len=%s)>' % len(self.body))) else: skip_body = False if (not skip_body): body = self.body for (k, v) in sorted(self.headers.items()): header = bytes_(('%s: %s' % (k, v))) parts.append(header) if body: parts.extend(['', body]) return '\r\n'.join(parts)
'Create a request from HTTP bytes data. If the bytes contain extra data after the request, raise a ValueError.'
@classmethod def from_bytes(cls, b):
f = io.BytesIO(b) r = cls.from_file(f) if (f.tell() != len(b)): raise ValueError('The string contains more data than expected') return r
'Read a request from a file-like object (it must implement ``.read(size)`` and ``.readline()``). It will read up to the end of the request, not the end of the file (unless the request is a POST or PUT and has no Content-Length, in that case, the entire file is read). This reads the request as represented by ``str(req)``; it may not read every valid HTTP request properly.'
@classmethod def from_file(cls, fp):
start_line = fp.readline() is_text = isinstance(start_line, text_type) if is_text: crlf = '\r\n' colon = ':' else: crlf = '\r\n' colon = ':' try: header = start_line.rstrip(crlf) (method, resource, http_version) = header.split(None, 2) method = native_(method, 'utf-8') resource = native_(resource, 'utf-8') http_version = native_(http_version, 'utf-8') except ValueError: raise ValueError(('Bad HTTP request line: %r' % start_line)) r = cls(environ_from_url(resource), http_version=http_version, method=method.upper()) del r.environ['HTTP_HOST'] while 1: line = fp.readline() if (not line.strip()): break (hname, hval) = line.split(colon, 1) hname = native_(hname, 'utf-8') hval = native_(hval, 'utf-8').strip() if (hname in r.headers): hval = ((r.headers[hname] + ', ') + hval) r.headers[hname] = hval if (r.method in ('PUT', 'POST')): clen = r.content_length if (clen is None): body = fp.read() else: body = fp.read(clen) if is_text: body = bytes_(body, 'utf-8') r.body = body return r
'Call the given WSGI application, returning ``(status_string, headerlist, app_iter)`` Be sure to call ``app_iter.close()`` if it\'s there. If catch_exc_info is true, then returns ``(status_string, headerlist, app_iter, exc_info)``, where the fourth item may be None, but won\'t be if there was an exception. If you don\'t do this and there was an exception, the exception will be raised directly.'
def call_application(self, application, catch_exc_info=False):
if self.is_body_seekable: self.body_file_raw.seek(0) captured = [] output = [] def start_response(status, headers, exc_info=None): if ((exc_info is not None) and (not catch_exc_info)): reraise(exc_info) captured[:] = [status, headers, exc_info] return output.append app_iter = application(self.environ, start_response) if (output or (not captured)): try: output.extend(app_iter) finally: if hasattr(app_iter, 'close'): app_iter.close() app_iter = output if catch_exc_info: return (captured[0], captured[1], app_iter, captured[2]) else: return (captured[0], captured[1], app_iter)
'Like ``.call_application(application)``, except returns a response object with ``.status``, ``.headers``, and ``.body`` attributes. This will use ``self.ResponseClass`` to figure out the class of the response object to return. If ``application`` is not given, this will send the request to ``self.make_default_send_app()``'
def send(self, application=None, catch_exc_info=False):
if (application is None): application = self.make_default_send_app() if catch_exc_info: (status, headers, app_iter, exc_info) = self.call_application(application, catch_exc_info=True) del exc_info else: (status, headers, app_iter) = self.call_application(application, catch_exc_info=False) return self.ResponseClass(status=status, headerlist=list(headers), app_iter=app_iter)
'Create a blank request environ (and Request wrapper) with the given path (path should be urlencoded), and any keys from environ. The path will become path_info, with any query string split off and used. All necessary keys will be added to the environ, but the values you pass in will take precedence. If you pass in base_url then wsgi.url_scheme, HTTP_HOST, and SCRIPT_NAME will be filled in from that value. Any extra keyword will be passed to ``__init__``.'
@classmethod def blank(cls, path, environ=None, base_url=None, headers=None, POST=None, **kw):
env = environ_from_url(path) if base_url: (scheme, netloc, path, query, fragment) = urlparse.urlsplit(base_url) if (query or fragment): raise ValueError(('base_url (%r) cannot have a query or fragment' % base_url)) if scheme: env['wsgi.url_scheme'] = scheme if netloc: if (':' not in netloc): if (scheme == 'http'): netloc += ':80' elif (scheme == 'https'): netloc += ':443' else: raise ValueError(('Unknown scheme: %r' % scheme)) (host, port) = netloc.split(':', 1) env['SERVER_PORT'] = port env['SERVER_NAME'] = host env['HTTP_HOST'] = netloc if path: env['SCRIPT_NAME'] = url_unquote(path) if environ: env.update(environ) content_type = kw.get('content_type', env.get('CONTENT_TYPE')) if (headers and ('Content-Type' in headers)): content_type = headers['Content-Type'] if (content_type is not None): kw['content_type'] = content_type environ_add_POST(env, POST, content_type=content_type) obj = cls(env, **kw) if (headers is not None): obj.headers.update(headers) return obj
'Create working set from list of path entries (default=sys.path)'
def __init__(self, entries=None):
self.entries = [] self.entry_keys = {} self.by_key = {} self.callbacks = [] if (entries is None): entries = sys.path for entry in entries: self.add_entry(entry)
'Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.)'
def add_entry(self, entry):
self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False)
'True if `dist` is the active distribution for its project'
def __contains__(self, dist):
return (self.by_key.get(dist.key) == dist)
'Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned.'
def find(self, req):
dist = self.by_key.get(req.key) if ((dist is not None) and (dist not in req)): raise VersionConflict(dist, req) else: return dist
'Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order).'
def iter_entry_points(self, group, name=None):
for dist in self: entries = dist.get_entry_map(group) if (name is None): for ep in entries.values(): (yield ep) elif (name in entries): (yield entries[name])
'Locate distribution for `requires` and run `script_name` script'
def run_script(self, requires, script_name):
ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns)
'Yield distributions for non-duplicate projects in the working set The yield order is the order in which the items\' path entries were added to the working set.'
def __iter__(self):
seen = {} for item in self.entries: for key in self.entry_keys[item]: if (key not in seen): seen[key] = 1 (yield self.by_key[key])
'Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set\'s ``.entries`` (if it wasn\'t already present). `dist` is only added to the working set if it\'s for a project that doesn\'t already have a distribution in the set. If it\'s added, any callbacks registered with the ``subscribe()`` method will be called.'
def add(self, dist, entry=None, insert=True):
if insert: dist.insert_on(self.entries, entry) if (entry is None): entry = dist.location keys = self.entry_keys.setdefault(entry, []) keys2 = self.entry_keys.setdefault(dist.location, []) if (dist.key in self.by_key): return self.by_key[dist.key] = dist if (dist.key not in keys): keys.append(dist.key) if (dist.key not in keys2): keys2.append(dist.key) self._added_new(dist)
'List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, if supplied, should be an ``Environment`` instance. If not supplied, it defaults to all distributions available within any entry or distribution in the working set. `installer`, if supplied, will be invoked with each requirement that cannot be met by an already-installed distribution; it should return a ``Distribution`` or ``None``.'
def resolve(self, requirements, env=None, installer=None):
requirements = list(requirements)[::(-1)] processed = {} best = {} to_activate = [] while requirements: req = requirements.pop(0) if (req in processed): continue dist = best.get(req.key) if (dist is None): dist = self.by_key.get(req.key) if (dist is None): if (env is None): env = Environment(self.entries) dist = best[req.key] = env.best_match(req, self, installer) if (dist is None): raise DistributionNotFound(req) to_activate.append(dist) if (dist not in req): raise VersionConflict(dist, req) requirements.extend(dist.requires(req.extras)[::(-1)]) processed[req] = True return to_activate
'Find all activatable distributions in `plugin_env` Example usage:: distributions, errors = working_set.find_plugins( Environment(plugin_dirlist) map(working_set.add, distributions) # add plugins+libs to sys.path print "Couldn\'t load", errors # display errors The `plugin_env` should be an ``Environment`` instance that contains only distributions that are in the project\'s "plugin directory" or directories. The `full_env`, if supplied, should be an ``Environment`` contains all currently-available distributions. If `full_env` is not supplied, one is created automatically from the ``WorkingSet`` this method is called on, which will typically mean that every directory on ``sys.path`` will be scanned for distributions. `installer` is a standard installer callback as used by the ``resolve()`` method. The `fallback` flag indicates whether we should attempt to resolve older versions of a plugin if the newest version cannot be resolved. This method returns a 2-tuple: (`distributions`, `error_info`), where `distributions` is a list of the distributions found in `plugin_env` that were loadable, along with any other distributions that are needed to resolve their dependencies. `error_info` is a dictionary mapping unloadable plugin distributions to an exception instance describing the error that occurred. Usually this will be a ``DistributionNotFound`` or ``VersionConflict`` instance.'
def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True):
plugin_projects = list(plugin_env) plugin_projects.sort() error_info = {} distributions = {} if (full_env is None): env = Environment(self.entries) env += plugin_env else: env = (full_env + plugin_env) shadow_set = self.__class__([]) map(shadow_set.add, self) for project_name in plugin_projects: for dist in plugin_env[project_name]: req = [dist.as_requirement()] try: resolvees = shadow_set.resolve(req, env, installer) except ResolutionError as v: error_info[dist] = v if fallback: continue else: break else: map(shadow_set.add, resolvees) distributions.update(dict.fromkeys(resolvees)) break distributions = list(distributions) distributions.sort() return (distributions, error_info)
'Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set.'
def require(self, *requirements):
needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed
'Invoke `callback` for all distributions (including existing ones)'
def subscribe(self, callback):
if (callback in self.callbacks): return self.callbacks.append(callback) for dist in self: callback(dist)
'Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``\'2.4\'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version.'
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
self._distmap = {} self._cache = {} self.platform = platform self.python = python self.scan(search_path)
'Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned.'
def can_add(self, dist):
return (((self.python is None) or (dist.py_version is None) or (dist.py_version == self.python)) and compatible_platforms(dist.platform, self.platform))
'Remove `dist` from the environment'
def remove(self, dist):
self._distmap[dist.key].remove(dist)
'Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added.'
def scan(self, search_path=None):
if (search_path is None): search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist)
'Return a newest-to-oldest list of distributions for `project_name`'
def __getitem__(self, project_name):
try: return self._cache[project_name] except KeyError: project_name = project_name.lower() if (project_name not in self._distmap): return [] if (project_name not in self._cache): dists = self._cache[project_name] = self._distmap[project_name] _sort_dists(dists) return self._cache[project_name]
'Add `dist` if we ``can_add()`` it and it isn\'t already added'
def add(self, dist):
if (self.can_add(dist) and dist.has_version()): dists = self._distmap.setdefault(dist.key, []) if (dist not in dists): dists.append(dist) if (dist.key in self._cache): _sort_dists(self._cache[dist.key])
'Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn\'t active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment\'s ``obtain(req, installer)`` method will be returned.'
def best_match(self, req, working_set, installer=None):
dist = working_set.find(req) if (dist is not None): return dist for dist in self[req.key]: if (dist in req): return dist return self.obtain(req, installer)
'Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.'
def obtain(self, requirement, installer=None):
if (installer is not None): return installer(requirement)
'Yield the unique project names of the available distributions'
def __iter__(self):
for key in self._distmap.keys(): if self[key]: (yield key)
'In-place addition of a distribution or environment'
def __iadd__(self, other):
if isinstance(other, Distribution): self.add(other) elif isinstance(other, Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError(("Can't add %r to environment" % (other,))) return self
'Add an environment or distribution to an environment'
def __add__(self, other):
new = self.__class__([], platform=None, python=None) for env in (self, other): new += env return new
'Does the named resource exist?'
def resource_exists(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).has_resource(resource_name)
'Is the named resource an existing directory?'
def resource_isdir(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).resource_isdir(resource_name)
'Return a true filesystem path for specified resource'
def resource_filename(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).get_resource_filename(self, resource_name)
'Return a readable file-like object for specified resource'
def resource_stream(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).get_resource_stream(self, resource_name)
'Return specified resource as a string'
def resource_string(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).get_resource_string(self, resource_name)
'List the contents of the named resource directory'
def resource_listdir(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).resource_listdir(resource_name)
'Give an error message for problems extracting file(s)'
def extraction_error(self):
old_exc = sys.exc_info()[1] cache_path = (self.extraction_path or get_default_cache()) err = ExtractionError(("Can't extract file(s) to egg cache\n\nThe following error occurred while trying to extract file(s) to the Python egg\ncache:\n\n %s\n\nThe Python egg cache directory is currently set to:\n\n %s\n\nPerhaps your account does not have write access to this directory? You can\nchange the cache directory by setting the PYTHON_EGG_CACHE environment\nvariable to point to an accessible directory.\n" % (old_exc, cache_path))) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err
'Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg\'s extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later.'
def get_cache_path(self, archive_name, names=()):
extract_path = (self.extraction_path or get_default_cache()) target_path = os.path.join(extract_path, (archive_name + '-tmp'), *names) try: _bypass_ensure_directory(target_path) except: self.extraction_error() self.cached_files[target_path] = 1 return target_path
'Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don\'t have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns.'
def postprocess(self, tempname, filename):
if (os.name == 'posix'): mode = ((os.stat(tempname).st_mode | 365) & 4095) os.chmod(tempname, mode)
'Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine\'s documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.)'
def set_extraction_path(self, path):
if self.cached_files: raise ValueError("Can't change extraction path, files already extracted") self.extraction_path = path
'Create a metadata provider from a zipimporter'
def __init__(self, importer):
self.zipinfo = zipimport._zip_directory_cache[importer.archive] self.zip_pre = (importer.archive + os.sep) self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) else: self.module_path = importer.archive self._setup_prefix()
'Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1,extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional'
def parse(cls, src, dist=None):
try: attrs = extras = () (name, value) = src.split('=', 1) if ('[' in value): (value, extras) = value.split('[', 1) req = Requirement.parse(('x[' + extras)) if req.specs: raise ValueError extras = req.extras if (':' in value): (value, attrs) = value.split(':', 1) if (not MODULE(attrs.rstrip())): raise ValueError attrs = attrs.rstrip().split('.') except ValueError: raise ValueError("EntryPoint must be in 'name=module:attrs [extras]' format", src) else: return cls(name.strip(), value.strip(), attrs, extras, dist)
'Parse an entry point group'
def parse_group(cls, group, lines, dist=None):
if (not MODULE(group)): raise ValueError('Invalid group name', group) this = {} for line in yield_lines(lines): ep = cls.parse(line, dist) if (ep.name in this): raise ValueError('Duplicate entry point', group, ep.name) this[ep.name] = ep return this
'Parse a map of entry point groups'
def parse_map(cls, data, dist=None):
if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for (group, lines) in data: if (group is None): if (not lines): continue raise ValueError('Entry points must be listed in groups') group = group.strip() if (group in maps): raise ValueError('Duplicate group name', group) maps[group] = cls.parse_group(group, lines, dist) return maps
'List of Requirements needed for this distro if `extras` are used'
def requires(self, extras=()):
dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra(('%s has no such extra feature %r' % (self, ext))) return deps
'Ensure distribution is importable on `path` (default=sys.path)'
def activate(self, path=None):
if (path is None): path = sys.path self.insert_on(path) if (path is sys.path): fixup_namespace_packages(self.location) map(declare_namespace, self._get_metadata('namespace_packages.txt'))
'Return what this distribution\'s standard .egg filename should be'
def egg_name(self):
filename = ('%s-%s-py%s' % (to_filename(self.project_name), to_filename(self.version), (self.py_version or PY_MAJOR))) if self.platform: filename += ('-' + self.platform) return filename
'Delegate all unrecognized public attributes to .metadata provider'
def __getattr__(self, attr):
if attr.startswith('_'): raise AttributeError, attr return getattr(self._provider, attr)
'Return a ``Requirement`` that matches this distribution exactly'
def as_requirement(self):
return Requirement.parse(('%s==%s' % (self.project_name, self.version)))
'Return the `name` entry point of `group` or raise ImportError'
def load_entry_point(self, group, name):
ep = self.get_entry_info(group, name) if (ep is None): raise ImportError(('Entry point %r not found' % ((group, name),))) return ep.load()
'Return the entry point map for `group`, or the full entry map'
def get_entry_map(self, group=None):
try: ep_map = self._ep_map except AttributeError: ep_map = self._ep_map = EntryPoint.parse_map(self._get_metadata('entry_points.txt'), self) if (group is not None): return ep_map.get(group, {}) return ep_map
'Return the EntryPoint object for `group`+`name`, or ``None``'
def get_entry_info(self, group, name):
return self.get_entry_map(group).get(name)
'Insert self.location in path before its nearest parent directory'
def insert_on(self, path, loc=None):
loc = (loc or self.location) if (not loc): return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath = [((p and _normalize_cached(p)) or p) for p in path] bp = None for (p, item) in enumerate(npath): if (item == nloc): break elif ((item == bdir) and (self.precedence == EGG_DIST)): if (path is sys.path): self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if (path is sys.path): self.check_version_conflict() path.append(loc) return while 1: try: np = npath.index(nloc, (p + 1)) except ValueError: break else: del npath[np], path[np] p = np return
'Copy this distribution, substituting in any changed keyword args'
def clone(self, **kw):
for attr in ('project_name', 'version', 'py_version', 'platform', 'location', 'precedence'): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw)
'DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!'
def __init__(self, project_name, specs, extras):
(self.unsafe_name, project_name) = (project_name, safe_name(project_name)) (self.project_name, self.key) = (project_name, project_name.lower()) index = [(parse_version(v), state_machine[op], op, v) for (op, v) in specs] index.sort() self.specs = [(op, ver) for (parsed, trans, op, ver) in index] (self.index, self.extras) = (index, tuple(map(safe_extra, extras))) self.hashCmp = (self.key, tuple([(op, parsed) for (parsed, trans, op, ver) in index]), frozenset(self.extras)) self.__hash = hash(self.hashCmp)
'Return a suite of all tests cases contained in the given module If the module is a package, load tests from all the modules in it. If the module has an ``additional_tests`` function, call it and add the return value to the tests.'
def loadTestsFromModule(self, module):
tests = [] if (module.__name__ != 'setuptools.tests.doctest'): tests.append(TestLoader.loadTestsFromModule(self, module)) if hasattr(module, 'additional_tests'): tests.append(module.additional_tests()) if hasattr(module, '__path__'): for file in resource_listdir(module.__name__, ''): if (file.endswith('.py') and (file != '__init__.py')): submodule = ((module.__name__ + '.') + file[:(-3)]) elif resource_exists(module.__name__, (file + '/__init__.py')): submodule = ((module.__name__ + '.') + file) else: continue tests.append(self.loadTestsFromName(submodule)) if (len(tests) != 1): return self.suiteClass(tests) else: return tests[0]
'Build modules, packages, and copy data files to build directory'
def run(self):
if ((not self.py_modules) and (not self.packages)): return if self.py_modules: self.build_modules() if self.packages: self.build_packages() self.build_package_data() self.byte_compile(_build_py.get_outputs(self, include_bytecode=0))
'Generate list of \'(package,src_dir,build_dir,filenames)\' tuples'
def _get_data_files(self):
self.analyze_manifest() data = [] for package in (self.packages or ()): src_dir = self.get_package_dir(package) build_dir = os.path.join(*([self.build_lib] + package.split('.'))) plen = (len(src_dir) + 1) filenames = [file[plen:] for file in self.find_data_files(package, src_dir)] data.append((package, src_dir, build_dir, filenames)) return data
'Return filenames for package\'s data files in \'src_dir\''
def find_data_files(self, package, src_dir):
globs = (self.package_data.get('', []) + self.package_data.get(package, [])) files = self.manifest_files.get(package, [])[:] for pattern in globs: files.extend(glob(os.path.join(src_dir, convert_path(pattern)))) return self.exclude_data_files(package, src_dir, files)
'Copy data files into build directory'
def build_package_data(self):
lastdir = None for (package, src_dir, build_dir, filenames) in self.data_files: for filename in filenames: target = os.path.join(build_dir, filename) self.mkpath(os.path.dirname(target)) self.copy_file(os.path.join(src_dir, filename), target)
'Check namespace packages\' __init__ for declare_namespace'
def check_package(self, package, package_dir):
try: return self.packages_checked[package] except KeyError: pass init_py = _build_py.check_package(self, package, package_dir) self.packages_checked[package] = init_py if ((not init_py) or (not self.distribution.namespace_packages)): return init_py for pkg in self.distribution.namespace_packages: if ((pkg == package) or pkg.startswith((package + '.'))): break else: return init_py f = open(init_py, 'rU') if ('declare_namespace' not in f.read()): from distutils import log log.warn('WARNING: %s is a namespace package, but its __init__.py does\nnot declare_namespace(); setuptools 0.7 will REQUIRE this!\n(See the setuptools manual under "Namespace Packages" for details.)\n', package) f.close() return init_py
'Filter filenames for package\'s data files in \'src_dir\''
def exclude_data_files(self, package, src_dir, files):
globs = (self.exclude_package_data.get('', []) + self.exclude_package_data.get(package, [])) bad = [] for pattern in globs: bad.extend(fnmatch.filter(files, os.path.join(src_dir, convert_path(pattern)))) bad = dict.fromkeys(bad) seen = {} return [f for f in files if ((f not in bad) and (f not in seen) and seen.setdefault(f, 1))]
'Write an executable file to the scripts directory'
def write_script(self, script_name, contents, mode='t', *ignored):
log.info('Installing %s script to %s', script_name, self.install_dir) target = os.path.join(self.install_dir, script_name) self.outfiles.append(target) if (not self.dry_run): ensure_directory(target) f = open(target, ('w' + mode)) f.write(contents) f.close() chmod(target, 493)