desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Build extensions in build directory, then copy if --inplace'
def run(self):
(old_inplace, self.inplace) = (self.inplace, 0) _build_ext.run(self) self.inplace = old_inplace if old_inplace: self.copy_extensions_to_source()
'Return true if \'ext\' links to a dynamic lib in the same package'
def links_to_dynamic(self, ext):
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs]) pkg = '.'.join((ext._full_name.split('.')[:(-1)] + [''])) for libname in ext.libraries: if ((pkg + libname) in libnames): return True return False
'Write `data` to `filename` or delete if empty If `data` is non-empty, this routine is the same as ``write_file()``. If `data` is empty but not ``None``, this is the same as calling ``delete_file(filename)`. If `data` is ``None``, then this is a no-op unless `filename` exists, in which case a warning is issued about the orphaned file (if `force` is false), or deleted (if `force` is true).'
def write_or_delete_file(self, what, filename, data, force=False):
if data: self.write_file(what, filename, data) elif os.path.exists(filename): if ((data is None) and (not force)): log.warn('%s not set in setup(), but %s exists', what, filename) return else: self.delete_file(filename)
'Write `data` to `filename` (if not a dry run) after announcing it `what` is used in a log message to identify what is being written to the file.'
def write_file(self, what, filename, data):
log.info('writing %s to %s', what, filename) if (not self.dry_run): f = open(filename, 'wb') f.write(data) f.close()
'Delete `filename` (if not a dry run) after announcing it'
def delete_file(self, filename):
log.info('deleting %s', filename) if (not self.dry_run): os.unlink(filename)
'Generate SOURCES.txt manifest file'
def find_sources(self):
manifest_filename = os.path.join(self.egg_info, 'SOURCES.txt') mm = manifest_maker(self.distribution) mm.manifest = manifest_filename mm.run() self.filelist = mm.filelist
'Write the file list in \'self.filelist\' (presumably as filled in by \'add_defaults()\' and \'read_template()\') to the manifest file named by \'self.manifest\'.'
def write_manifest(self):
files = self.filelist.files if (os.sep != '/'): files = [f.replace(os.sep, '/') for f in files] self.execute(write_file, (self.manifest, files), ("writing manifest file '%s'" % self.manifest))
'Return a pseudo-tempname base in the install directory. This code is intentionally naive; if a malicious party can write to the target directory you\'re already in deep doodoo.'
def pseudo_tempname(self):
try: pid = os.getpid() except: pid = random.randint(0, sys.maxint) return os.path.join(self.install_dir, ('test-easy-install-%s' % pid))
'Verify that self.install_dir is .pth-capable dir, if needed'
def check_site_dir(self):
instdir = normalize_path(self.install_dir) pth_file = os.path.join(instdir, 'easy-install.pth') is_site_dir = (instdir in self.all_site_dirs) if ((not is_site_dir) and (not self.multi_version)): is_site_dir = self.check_pth_processing() else: testfile = (self.pseudo_tempname() + '.write-test') test_exists = os.path.exists(testfile) try: if test_exists: os.unlink(testfile) open(testfile, 'w').close() os.unlink(testfile) except (OSError, IOError): self.cant_write_to_target() if ((not is_site_dir) and (not self.multi_version)): raise DistutilsError(self.no_default_version_msg()) if is_site_dir: if (self.pth_file is None): self.pth_file = PthDistributions(pth_file, self.all_site_dirs) else: self.pth_file = None PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep) if (instdir not in map(normalize_path, filter(None, PYTHONPATH))): self.sitepy_installed = True elif (self.multi_version and (not os.path.exists(pth_file))): self.sitepy_installed = True self.pth_file = None self.install_dir = instdir
'Empirically verify whether .pth files are supported in inst. dir'
def check_pth_processing(self):
instdir = self.install_dir log.info('Checking .pth file support in %s', instdir) pth_file = (self.pseudo_tempname() + '.pth') ok_file = (pth_file + '.ok') ok_exists = os.path.exists(ok_file) try: if ok_exists: os.unlink(ok_file) f = open(pth_file, 'w') except (OSError, IOError): self.cant_write_to_target() else: try: f.write(("import os;open(%r,'w').write('OK')\n" % (ok_file,))) f.close() f = None executable = sys.executable if (os.name == 'nt'): (dirname, basename) = os.path.split(executable) alt = os.path.join(dirname, 'pythonw.exe') if ((basename.lower() == 'python.exe') and os.path.exists(alt)): executable = alt from distutils.spawn import spawn spawn([executable, '-E', '-c', 'pass'], 0) if os.path.exists(ok_file): log.info('TEST PASSED: %s appears to support .pth files', instdir) return True finally: if f: f.close() if os.path.exists(ok_file): os.unlink(ok_file) if os.path.exists(pth_file): os.unlink(pth_file) if (not self.multi_version): log.warn('TEST FAILED: %s does NOT support .pth files', instdir) return False
'Write all the scripts for `dist`, unless scripts are excluded'
def install_egg_scripts(self, dist):
if ((not self.exclude_scripts) and dist.metadata_isdir('scripts')): for script_name in dist.metadata_listdir('scripts'): self.install_script(dist, script_name, dist.get_metadata(('scripts/' + script_name))) self.install_wrapper_scripts(dist)
'Generate a legacy script wrapper and install it'
def install_script(self, dist, script_name, script_text, dev_path=None):
spec = str(dist.as_requirement()) is_script = is_python_script(script_text, script_name) if (is_script and dev_path): script_text = (get_script_header(script_text) + ('# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r\n__requires__ = %(spec)r\nfrom pkg_resources import require; require(%(spec)r)\ndel require\n__file__ = %(dev_path)r\nexecfile(__file__)\n' % locals())) elif is_script: script_text = (get_script_header(script_text) + ('# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r\n__requires__ = %(spec)r\nimport pkg_resources\npkg_resources.run_script(%(spec)r, %(script_name)r)\n' % locals())) self.write_script(script_name, script_text, 'b')
'Write an executable file to the scripts directory'
def write_script(self, script_name, contents, mode='t', blockers=()):
self.delete_blockers([os.path.join(self.script_dir, x) for x in blockers]) log.info('Installing %s script to %s', script_name, self.script_dir) target = os.path.join(self.script_dir, script_name) self.add_output(target) if (not self.dry_run): ensure_directory(target) f = open(target, ('w' + mode)) f.write(contents) f.close() chmod(target, 493)
'Extract a bdist_wininst to the directories an egg would use'
def exe_to_egg(self, dist_filename, egg_tmp):
prefixes = get_exe_prefixes(dist_filename) to_compile = [] native_libs = [] top_level = {} def process(src, dst): s = src.lower() for (old, new) in prefixes: if s.startswith(old): src = (new + src[len(old):]) parts = src.split('/') dst = os.path.join(egg_tmp, *parts) dl = dst.lower() if (dl.endswith('.pyd') or dl.endswith('.dll')): parts[(-1)] = bdist_egg.strip_module(parts[(-1)]) top_level[os.path.splitext(parts[0])[0]] = 1 native_libs.append(src) elif (dl.endswith('.py') and (old != 'SCRIPTS/')): top_level[os.path.splitext(parts[0])[0]] = 1 to_compile.append(dst) return dst if (not src.endswith('.pth')): log.warn("WARNING: can't process %s", src) return None unpack_archive(dist_filename, egg_tmp, process) stubs = [] for res in native_libs: if res.lower().endswith('.pyd'): parts = res.split('/') resource = parts[(-1)] parts[(-1)] = (bdist_egg.strip_module(parts[(-1)]) + '.py') pyfile = os.path.join(egg_tmp, *parts) to_compile.append(pyfile) stubs.append(pyfile) bdist_egg.write_stub(resource, pyfile) self.byte_compile(to_compile) bdist_egg.write_safety_flag(os.path.join(egg_tmp, 'EGG-INFO'), bdist_egg.analyze_egg(egg_tmp, stubs)) for name in ('top_level', 'native_libs'): if locals()[name]: txt = os.path.join(egg_tmp, 'EGG-INFO', (name + '.txt')) if (not os.path.exists(txt)): open(txt, 'w').write(('\n'.join(locals()[name]) + '\n'))
'Verify that there are no conflicting "old-style" packages'
def check_conflicts(self, dist):
return dist from imp import find_module, get_suffixes from glob import glob blockers = [] names = dict.fromkeys(dist._get_metadata('top_level.txt')) exts = {'.pyc': 1, '.pyo': 1} for (ext, mode, typ) in get_suffixes(): exts[ext] = 1 for (path, files) in expand_paths(([self.install_dir] + self.all_site_dirs)): for filename in files: (base, ext) = os.path.splitext(filename) if (base in names): if (not ext): try: (f, filename, descr) = find_module(base, [path]) except ImportError: continue else: if f: f.close() if (filename not in blockers): blockers.append(filename) elif ((ext in exts) and (base != 'site')): blockers.append(os.path.join(path, filename)) if blockers: self.found_conflicts(dist, blockers) return dist
'Helpful installation message for display to package users'
def installation_report(self, req, dist, what='Installed'):
msg = '\n%(what)s %(eggloc)s%(extras)s' if (self.multi_version and (not self.no_report)): msg += '\n\nBecause this distribution was installed --multi-version, before you can\nimport modules from this package in an application, you will need to\n\'import pkg_resources\' and then use a \'require()\' call similar to one of\nthese examples, in order to select the desired version:\n\n pkg_resources.require("%(name)s") # latest installed version\n pkg_resources.require("%(name)s==%(version)s") # this exact version\n pkg_resources.require("%(name)s>=%(version)s") # this version or higher\n' if (self.install_dir not in map(normalize_path, sys.path)): msg += "\n\nNote also that the installation directory must be on sys.path at runtime for\nthis to work. (e.g. by being the application's script directory, by being on\nPYTHONPATH, or by being added to sys.path by your code.)\n" eggloc = dist.location name = dist.project_name version = dist.version extras = '' return (msg % locals())
'Make sure there\'s a site.py in the target dir, if needed'
def install_site_py(self):
if self.sitepy_installed: return sitepy = os.path.join(self.install_dir, 'site.py') source = resource_string(Requirement.parse('setuptools'), 'site.py') current = '' if os.path.exists(sitepy): log.debug('Checking existing site.py in %s', self.install_dir) current = open(sitepy, 'rb').read() if (not current.startswith('def __boot():')): raise DistutilsError(('%s is not a setuptools-generated site.py; please remove it.' % sitepy)) if (current != source): log.info('Creating %s', sitepy) if (not self.dry_run): ensure_directory(sitepy) f = open(sitepy, 'wb') f.write(source) f.close() self.byte_compile([sitepy]) self.sitepy_installed = True
'Write changed .pth file back to disk'
def save(self):
if (not self.dirty): return data = '\n'.join(map(self.make_relative, self.paths)) if data: log.debug('Saving %s', self.filename) data = ("import sys; sys.__plen = len(sys.path)\n%s\nimport sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)\n" % data) if os.path.islink(self.filename): os.unlink(self.filename) f = open(self.filename, 'wb') f.write(data) f.close() elif os.path.exists(self.filename): log.debug('Deleting empty %s', self.filename) os.unlink(self.filename) self.dirty = False
'Add `dist` to the distribution map'
def add(self, dist):
if ((dist.location not in self.paths) and (dist.location not in self.sitedirs)): self.paths.append(dist.location) self.dirty = True Environment.add(self, dist)
'Remove `dist` from the distribution map'
def remove(self, dist):
while (dist.location in self.paths): self.paths.remove(dist.location) self.dirty = True Environment.remove(self, dist)
'Invoke reinitialized command `cmdname` with keyword args'
def call_command(self, cmdname, **kw):
for dirname in INSTALL_DIRECTORY_ATTRS: kw.setdefault(dirname, self.bdist_dir) kw.setdefault('skip_build', self.skip_build) kw.setdefault('dry_run', self.dry_run) cmd = self.reinitialize_command(cmdname, **kw) self.run_command(cmdname) return cmd
'Create missing package __init__ files'
def make_init_files(self):
init_files = [] for (base, dirs, files) in walk_egg(self.bdist_dir): if (base == self.bdist_dir): continue for name in files: if name.endswith('.py'): if ('__init__.py' not in files): pkg = base[(len(self.bdist_dir) + 1):].replace(os.sep, '.') if self.distribution.has_contents_for(pkg): log.warn('Creating missing __init__.py for %s', pkg) filename = os.path.join(base, '__init__.py') if (not self.dry_run): f = open(filename, 'w') f.write(NS_PKG_STUB) f.close() init_files.append(filename) break else: dirs[:] = [] return init_files
'Get a list of relative paths to C extensions in the output distro'
def get_ext_outputs(self):
all_outputs = [] ext_outputs = [] paths = {self.bdist_dir: ''} for (base, dirs, files) in os.walk(self.bdist_dir): for filename in files: if (os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS): all_outputs.append((paths[base] + filename)) for filename in dirs: paths[os.path.join(base, filename)] = ((paths[base] + filename) + '/') if self.distribution.has_ext_modules(): build_cmd = self.get_finalized_command('build_ext') for ext in build_cmd.extensions: if isinstance(ext, Library): continue fullname = build_cmd.get_ext_fullname(ext.name) filename = build_cmd.get_ext_filename(fullname) if (not os.path.basename(filename).startswith('dl-')): if os.path.exists(os.path.join(self.bdist_dir, filename)): ext_outputs.append(filename) return (all_outputs, ext_outputs)
'Create a new DocTest containing the given examples. The DocTest\'s globals are initialized with a copy of `globs`.'
def __init__(self, examples, globs, name, filename, lineno, docstring):
assert (not isinstance(examples, basestring)), 'DocTest no longer accepts str; use DocTestParser instead' self.examples = examples self.docstring = docstring self.globs = globs.copy() self.name = name self.filename = filename self.lineno = lineno
'Divide the given string into examples and intervening text, and return them as a list of alternating Examples and strings. Line numbers for the Examples are 0-based. The optional argument `name` is a name identifying this string, and is only used for error messages.'
def parse(self, string, name='<string>'):
string = string.expandtabs() min_indent = self._min_indent(string) if (min_indent > 0): string = '\n'.join([l[min_indent:] for l in string.split('\n')]) output = [] (charno, lineno) = (0, 0) for m in self._EXAMPLE_RE.finditer(string): output.append(string[charno:m.start()]) lineno += string.count('\n', charno, m.start()) (source, options, want, exc_msg) = self._parse_example(m, name, lineno) if (not self._IS_BLANK_OR_COMMENT(source)): output.append(Example(source, want, exc_msg, lineno=lineno, indent=(min_indent + len(m.group('indent'))), options=options)) lineno += string.count('\n', m.start(), m.end()) charno = m.end() output.append(string[charno:]) return output
'Extract all doctest examples from the given string, and collect them into a `DocTest` object. `globs`, `name`, `filename`, and `lineno` are attributes for the new `DocTest` object. See the documentation for `DocTest` for more information.'
def get_doctest(self, string, globs, name, filename, lineno):
return DocTest(self.get_examples(string, name), globs, name, filename, lineno, string)
'Extract all doctest examples from the given string, and return them as a list of `Example` objects. Line numbers are 0-based, because it\'s most common in doctests that nothing interesting appears on the same line as opening triple-quote, and so the first interesting line is called "line 1" then. The optional argument `name` is a name identifying this string, and is only used for error messages.'
def get_examples(self, string, name='<string>'):
return [x for x in self.parse(string, name) if isinstance(x, Example)]
'Given a regular expression match from `_EXAMPLE_RE` (`m`), return a pair `(source, want)`, where `source` is the matched example\'s source code (with prompts and indentation stripped); and `want` is the example\'s expected output (with indentation stripped). `name` is the string\'s name, and `lineno` is the line number where the example starts; both are used for error messages.'
def _parse_example(self, m, name, lineno):
indent = len(m.group('indent')) source_lines = m.group('source').split('\n') self._check_prompt_blank(source_lines, indent, name, lineno) self._check_prefix(source_lines[1:], ((' ' * indent) + '.'), name, lineno) source = '\n'.join([sl[(indent + 4):] for sl in source_lines]) want = m.group('want') want_lines = want.split('\n') if ((len(want_lines) > 1) and re.match(' *$', want_lines[(-1)])): del want_lines[(-1)] self._check_prefix(want_lines, (' ' * indent), name, (lineno + len(source_lines))) want = '\n'.join([wl[indent:] for wl in want_lines]) m = self._EXCEPTION_RE.match(want) if m: exc_msg = m.group('msg') else: exc_msg = None options = self._find_options(source, name, lineno) return (source, options, want, exc_msg)
'Return a dictionary containing option overrides extracted from option directives in the given source string. `name` is the string\'s name, and `lineno` is the line number where the example starts; both are used for error messages.'
def _find_options(self, source, name, lineno):
options = {} for m in self._OPTION_DIRECTIVE_RE.finditer(source): option_strings = m.group(1).replace(',', ' ').split() for option in option_strings: if ((option[0] not in '+-') or (option[1:] not in OPTIONFLAGS_BY_NAME)): raise ValueError(('line %r of the doctest for %s has an invalid option: %r' % ((lineno + 1), name, option))) flag = OPTIONFLAGS_BY_NAME[option[1:]] options[flag] = (option[0] == '+') if (options and self._IS_BLANK_OR_COMMENT(source)): raise ValueError(('line %r of the doctest for %s has an option directive on a line with no example: %r' % (lineno, name, source))) return options
'Return the minimum indentation of any non-blank line in `s`'
def _min_indent(self, s):
indents = [len(indent) for indent in self._INDENT_RE.findall(s)] if (len(indents) > 0): return min(indents) else: return 0
'Given the lines of a source string (including prompts and leading indentation), check to make sure that every prompt is followed by a space character. If any line is not followed by a space character, then raise ValueError.'
def _check_prompt_blank(self, lines, indent, name, lineno):
for (i, line) in enumerate(lines): if ((len(line) >= (indent + 4)) and (line[(indent + 3)] != ' ')): raise ValueError(('line %r of the docstring for %s lacks blank after %s: %r' % (((lineno + i) + 1), name, line[indent:(indent + 3)], line)))
'Check that every line in the given list starts with the given prefix; if any line does not, then raise a ValueError.'
def _check_prefix(self, lines, prefix, name, lineno):
for (i, line) in enumerate(lines): if (line and (not line.startswith(prefix))): raise ValueError(('line %r of the docstring for %s has inconsistent leading whitespace: %r' % (((lineno + i) + 1), name, line)))
'Create a new doctest finder. The optional argument `parser` specifies a class or function that should be used to create new DocTest objects (or objects that implement the same interface as DocTest). The signature for this factory function should match the signature of the DocTest constructor. If the optional argument `recurse` is false, then `find` will only examine the given object, and not any contained objects. If the optional argument `exclude_empty` is false, then `find` will include tests for objects with empty docstrings.'
def __init__(self, verbose=False, parser=DocTestParser(), recurse=True, _namefilter=None, exclude_empty=True):
self._parser = parser self._verbose = verbose self._recurse = recurse self._exclude_empty = exclude_empty self._namefilter = _namefilter
'Return a list of the DocTests that are defined by the given object\'s docstring, or by any of its contained objects\' docstrings. The optional parameter `module` is the module that contains the given object. If the module is not specified or is None, then the test finder will attempt to automatically determine the correct module. The object\'s module is used: - As a default namespace, if `globs` is not specified. - To prevent the DocTestFinder from extracting DocTests from objects that are imported from other modules. - To find the name of the file containing the object. - To help find the line number of the object within its file. Contained objects whose module does not match `module` are ignored. If `module` is False, no attempt to find the module will be made. This is obscure, of use mostly in tests: if `module` is False, or is None but cannot be found automatically, then all objects are considered to belong to the (non-existent) module, so all contained objects will (recursively) be searched for doctests. The globals for each DocTest is formed by combining `globs` and `extraglobs` (bindings in `extraglobs` override bindings in `globs`). A new copy of the globals dictionary is created for each DocTest. If `globs` is not specified, then it defaults to the module\'s `__dict__`, if specified, or {} otherwise. If `extraglobs` is not specified, then it defaults to {}.'
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
if (name is None): name = getattr(obj, '__name__', None) if (name is None): raise ValueError(("DocTestFinder.find: name must be given when obj.__name__ doesn't exist: %r" % (type(obj),))) if (module is False): module = None elif (module is None): module = inspect.getmodule(obj) try: file = (inspect.getsourcefile(obj) or inspect.getfile(obj)) source_lines = linecache.getlines(file) if (not source_lines): source_lines = None except TypeError: source_lines = None if (globs is None): if (module is None): globs = {} else: globs = module.__dict__.copy() else: globs = globs.copy() if (extraglobs is not None): globs.update(extraglobs) tests = [] self._find(tests, obj, name, module, source_lines, globs, {}) return tests
'Return true if the given object should not be examined.'
def _filter(self, obj, prefix, base):
return ((self._namefilter is not None) and self._namefilter(prefix, base))
'Return true if the given object is defined in the given module.'
def _from_module(self, module, object):
if (module is None): return True elif inspect.isfunction(object): return (module.__dict__ is object.func_globals) elif inspect.isclass(object): return (module.__name__ == object.__module__) elif (inspect.getmodule(object) is not None): return (module is inspect.getmodule(object)) elif hasattr(object, '__module__'): return (module.__name__ == object.__module__) elif isinstance(object, property): return True else: raise ValueError('object must be a class or function')
'Find tests for the given object and any contained objects, and add them to `tests`.'
def _find(self, tests, obj, name, module, source_lines, globs, seen):
if self._verbose: print ('Finding tests in %s' % name) if (id(obj) in seen): return seen[id(obj)] = 1 test = self._get_test(obj, name, module, globs, source_lines) if (test is not None): tests.append(test) if (inspect.ismodule(obj) and self._recurse): for (valname, val) in obj.__dict__.items(): if self._filter(val, name, valname): continue valname = ('%s.%s' % (name, valname)) if ((inspect.isfunction(val) or inspect.isclass(val)) and self._from_module(module, val)): self._find(tests, val, valname, module, source_lines, globs, seen) if (inspect.ismodule(obj) and self._recurse): for (valname, val) in getattr(obj, '__test__', {}).items(): if (not isinstance(valname, basestring)): raise ValueError(('DocTestFinder.find: __test__ keys must be strings: %r' % (type(valname),))) if (not (inspect.isfunction(val) or inspect.isclass(val) or inspect.ismethod(val) or inspect.ismodule(val) or isinstance(val, basestring))): raise ValueError(('DocTestFinder.find: __test__ values must be strings, functions, methods, classes, or modules: %r' % (type(val),))) valname = ('%s.__test__.%s' % (name, valname)) self._find(tests, val, valname, module, source_lines, globs, seen) if (inspect.isclass(obj) and self._recurse): for (valname, val) in obj.__dict__.items(): if self._filter(val, name, valname): continue if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).im_func if ((inspect.isfunction(val) or inspect.isclass(val) or isinstance(val, property)) and self._from_module(module, val)): valname = ('%s.%s' % (name, valname)) self._find(tests, val, valname, module, source_lines, globs, seen)
'Return a DocTest for the given object, if it defines a docstring; otherwise, return None.'
def _get_test(self, obj, name, module, globs, source_lines):
if isinstance(obj, basestring): docstring = obj else: try: if (obj.__doc__ is None): docstring = '' else: docstring = obj.__doc__ if (not isinstance(docstring, basestring)): docstring = str(docstring) except (TypeError, AttributeError): docstring = '' lineno = self._find_lineno(obj, source_lines) if (self._exclude_empty and (not docstring)): return None if (module is None): filename = None else: filename = getattr(module, '__file__', module.__name__) if (filename[(-4):] in ('.pyc', '.pyo')): filename = filename[:(-1)] return self._parser.get_doctest(docstring, globs, name, filename, lineno)
'Return a line number of the given object\'s docstring. Note: this method assumes that the object has a docstring.'
def _find_lineno(self, obj, source_lines):
lineno = None if inspect.ismodule(obj): lineno = 0 if inspect.isclass(obj): if (source_lines is None): return None pat = re.compile(('^\\s*class\\s*%s\\b' % getattr(obj, '__name__', '-'))) for (i, line) in enumerate(source_lines): if pat.match(line): lineno = i break if inspect.ismethod(obj): obj = obj.im_func if inspect.isfunction(obj): obj = obj.func_code if inspect.istraceback(obj): obj = obj.tb_frame if inspect.isframe(obj): obj = obj.f_code if inspect.iscode(obj): lineno = (getattr(obj, 'co_firstlineno', None) - 1) if (lineno is not None): if (source_lines is None): return (lineno + 1) pat = re.compile('(^|.*:)\\s*\\w*("|\')') for lineno in range(lineno, len(source_lines)): if pat.match(source_lines[lineno]): return lineno return None
'Create a new test runner. Optional keyword arg `checker` is the `OutputChecker` that should be used to compare the expected outputs and actual outputs of doctest examples. Optional keyword arg \'verbose\' prints lots of stuff if true, only failures if false; by default, it\'s true iff \'-v\' is in sys.argv. Optional argument `optionflags` can be used to control how the test runner compares expected output to actual output, and how it displays failures. See the documentation for `testmod` for more information.'
def __init__(self, checker=None, verbose=None, optionflags=0):
self._checker = (checker or OutputChecker()) if (verbose is None): verbose = ('-v' in sys.argv) self._verbose = verbose self.optionflags = optionflags self.original_optionflags = optionflags self.tries = 0 self.failures = 0 self._name2ft = {} self._fakeout = _SpoofOut()
'Report that the test runner is about to process the given example. (Only displays a message if verbose=True)'
def report_start(self, out, test, example):
if self._verbose: if example.want: out(((('Trying:\n' + _indent(example.source)) + 'Expecting:\n') + _indent(example.want))) else: out((('Trying:\n' + _indent(example.source)) + 'Expecting nothing\n'))
'Report that the given example ran successfully. (Only displays a message if verbose=True)'
def report_success(self, out, test, example, got):
if self._verbose: out('ok\n')
'Report that the given example failed.'
def report_failure(self, out, test, example, got):
out((self._failure_header(test, example) + self._checker.output_difference(example, got, self.optionflags)))
'Report that the given example raised an unexpected exception.'
def report_unexpected_exception(self, out, test, example, exc_info):
out(((self._failure_header(test, example) + 'Exception raised:\n') + _indent(_exception_traceback(exc_info))))
'Run the examples in `test`. Write the outcome of each example with one of the `DocTestRunner.report_*` methods, using the writer function `out`. `compileflags` is the set of compiler flags that should be used to execute examples. Return a tuple `(f, t)`, where `t` is the number of examples tried, and `f` is the number of examples that failed. The examples are run in the namespace `test.globs`.'
def __run(self, test, compileflags, out):
failures = tries = 0 original_optionflags = self.optionflags (SUCCESS, FAILURE, BOOM) = range(3) check = self._checker.check_output for (examplenum, example) in enumerate(test.examples): quiet = ((self.optionflags & REPORT_ONLY_FIRST_FAILURE) and (failures > 0)) self.optionflags = original_optionflags if example.options: for (optionflag, val) in example.options.items(): if val: self.optionflags |= optionflag else: self.optionflags &= (~ optionflag) tries += 1 if (not quiet): self.report_start(out, test, example) filename = ('<doctest %s[%d]>' % (test.name, examplenum)) try: exec compile(example.source, filename, 'single', compileflags, 1) in test.globs self.debugger.set_continue() exception = None except KeyboardInterrupt: raise except: exception = sys.exc_info() self.debugger.set_continue() got = self._fakeout.getvalue() self._fakeout.truncate(0) outcome = FAILURE if (exception is None): if check(example.want, got, self.optionflags): outcome = SUCCESS else: exc_info = sys.exc_info() exc_msg = traceback.format_exception_only(*exc_info[:2])[(-1)] if (not quiet): got += _exception_traceback(exc_info) if (example.exc_msg is None): outcome = BOOM elif check(example.exc_msg, exc_msg, self.optionflags): outcome = SUCCESS elif (self.optionflags & IGNORE_EXCEPTION_DETAIL): m1 = re.match('[^:]*:', example.exc_msg) m2 = re.match('[^:]*:', exc_msg) if (m1 and m2 and check(m1.group(0), m2.group(0), self.optionflags)): outcome = SUCCESS if (outcome is SUCCESS): if (not quiet): self.report_success(out, test, example, got) elif (outcome is FAILURE): if (not quiet): self.report_failure(out, test, example, got) failures += 1 elif (outcome is BOOM): if (not quiet): self.report_unexpected_exception(out, test, example, exc_info) failures += 1 else: assert False, ('unknown outcome', outcome) self.optionflags = original_optionflags self.__record_outcome(test, failures, tries) return (failures, tries)
'Record the fact that the given DocTest (`test`) generated `f` failures out of `t` tried examples.'
def __record_outcome(self, test, f, t):
(f2, t2) = self._name2ft.get(test.name, (0, 0)) self._name2ft[test.name] = ((f + f2), (t + t2)) self.failures += f self.tries += t
'Run the examples in `test`, and display the results using the writer function `out`. The examples are run in the namespace `test.globs`. If `clear_globs` is true (the default), then this namespace will be cleared after the test runs, to help with garbage collection. If you would like to examine the namespace after the test completes, then use `clear_globs=False`. `compileflags` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to `globs`. The output of each example is checked using `DocTestRunner.check_output`, and the results are formatted by the `DocTestRunner.report_*` methods.'
def run(self, test, compileflags=None, out=None, clear_globs=True):
self.test = test if (compileflags is None): compileflags = _extract_future_flags(test.globs) save_stdout = sys.stdout if (out is None): out = save_stdout.write sys.stdout = self._fakeout save_set_trace = pdb.set_trace self.debugger = _OutputRedirectingPdb(save_stdout) self.debugger.reset() pdb.set_trace = self.debugger.set_trace self.save_linecache_getlines = linecache.getlines linecache.getlines = self.__patched_linecache_getlines try: return self.__run(test, compileflags, out) finally: sys.stdout = save_stdout pdb.set_trace = save_set_trace linecache.getlines = self.save_linecache_getlines if clear_globs: test.globs.clear()
'Print a summary of all the test cases that have been run by this DocTestRunner, and return a tuple `(f, t)`, where `f` is the total number of failed examples, and `t` is the total number of tried examples. The optional `verbose` argument controls how detailed the summary is. If the verbosity is not specified, then the DocTestRunner\'s verbosity is used.'
def summarize(self, verbose=None):
if (verbose is None): verbose = self._verbose notests = [] passed = [] failed = [] totalt = totalf = 0 for x in self._name2ft.items(): (name, (f, t)) = x assert (f <= t) totalt += t totalf += f if (t == 0): notests.append(name) elif (f == 0): passed.append((name, t)) else: failed.append(x) if verbose: if notests: print len(notests), 'items had no tests:' notests.sort() for thing in notests: print ' ', thing if passed: print len(passed), 'items passed all tests:' passed.sort() for (thing, count) in passed: print (' %3d tests in %s' % (count, thing)) if failed: print self.DIVIDER print len(failed), 'items had failures:' failed.sort() for (thing, (f, t)) in failed: print (' %3d of %3d in %s' % (f, t, thing)) if verbose: print totalt, 'tests in', len(self._name2ft), 'items.' print (totalt - totalf), 'passed and', totalf, 'failed.' if totalf: print '***Test Failed***', totalf, 'failures.' elif verbose: print 'Test passed.' return (totalf, totalt)
'Return True iff the actual output from an example (`got`) matches the expected output (`want`). These strings are always considered to match if they are identical; but depending on what option flags the test runner is using, several non-exact match types are also possible. See the documentation for `TestRunner` for more information about option flags.'
def check_output(self, want, got, optionflags):
if (got == want): return True if (not (optionflags & DONT_ACCEPT_TRUE_FOR_1)): if ((got, want) == ('True\n', '1\n')): return True if ((got, want) == ('False\n', '0\n')): return True if (not (optionflags & DONT_ACCEPT_BLANKLINE)): want = re.sub(('(?m)^%s\\s*?$' % re.escape(BLANKLINE_MARKER)), '', want) got = re.sub('(?m)^\\s*?$', '', got) if (got == want): return True if (optionflags & NORMALIZE_WHITESPACE): got = ' '.join(got.split()) want = ' '.join(want.split()) if (got == want): return True if (optionflags & ELLIPSIS): if _ellipsis_match(want, got): return True return False
'Return a string describing the differences between the expected output for a given example (`example`) and the actual output (`got`). `optionflags` is the set of option flags used to compare `want` and `got`.'
def output_difference(self, example, got, optionflags):
want = example.want if (not (optionflags & DONT_ACCEPT_BLANKLINE)): got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) if self._do_a_fancy_diff(want, got, optionflags): want_lines = want.splitlines(True) got_lines = got.splitlines(True) if (optionflags & REPORT_UDIFF): diff = difflib.unified_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] kind = 'unified diff with -expected +actual' elif (optionflags & REPORT_CDIFF): diff = difflib.context_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] kind = 'context diff with expected followed by actual' elif (optionflags & REPORT_NDIFF): engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) diff = list(engine.compare(want_lines, got_lines)) kind = 'ndiff with -expected +actual' else: assert 0, 'Bad diff option' diff = [(line.rstrip() + '\n') for line in diff] return (('Differences (%s):\n' % kind) + _indent(''.join(diff))) if (want and got): return ('Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))) elif want: return ('Expected:\n%sGot nothing\n' % _indent(want)) elif got: return ('Expected nothing\nGot:\n%s' % _indent(got)) else: return 'Expected nothing\nGot nothing\n'
'Run the test case without results and without catching exceptions The unit test framework includes a debug method on test cases and test suites to support post-mortem debugging. The test code is run in such a way that errors are not caught. This way a caller can catch the errors and initiate post-mortem debugging. The DocTestCase provides a debug method that raises UnexpectedException errors if there is an unexepcted exception: >>> test = DocTestParser().get_doctest(\'>>> raise KeyError\n42\', ... {}, \'foo\', \'foo.py\', 0) >>> case = DocTestCase(test) >>> try: ... case.debug() ... except UnexpectedException, failure: ... pass The UnexpectedException contains the test, the example, and the original exception: >>> failure.test is test True >>> failure.example.want \'42\n\' >>> exc_info = failure.exc_info >>> raise exc_info[0], exc_info[1], exc_info[2] Traceback (most recent call last): KeyError If the output doesn\'t match, then a DocTestFailure is raised: >>> test = DocTestParser().get_doctest(\'\'\' ... >>> x = 1 ... >>> x ... 2 ... \'\'\', {}, \'foo\', \'foo.py\', 0) >>> case = DocTestCase(test) >>> try: ... case.debug() ... except DocTestFailure, failure: ... pass DocTestFailure objects provide access to the test: >>> failure.test is test True As well as to the example: >>> failure.example.want \'2\n\' and the actual output: >>> failure.got \'1\n\''
def debug(self):
self.setUp() runner = DebugRunner(optionflags=self._dt_optionflags, checker=self._dt_checker, verbose=False) runner.run(self._dt_test) self.tearDown()
'val -> _TestClass object with associated value val. >>> t = _TestClass(123) >>> print t.get() 123'
def __init__(self, val):
self.val = val
'square() -> square TestClass\'s associated value >>> _TestClass(13).square().get() 169'
def square(self):
self.val = (self.val ** 2) return self
'get() -> return TestClass\'s associated value. >>> x = _TestClass(-42) >>> print x.get() -42'
def get(self):
return self.val
'Return full package/distribution name, w/version'
def full_name(self):
if (self.requested_version is not None): return ('%s-%s' % (self.name, self.requested_version)) return self.name
'Is \'version\' sufficiently up-to-date?'
def version_ok(self, version):
return ((self.attribute is None) or (self.format is None) or ((str(version) != 'unknown') and (version >= self.requested_version)))
'Get version number of installed module, \'None\', or \'default\' Search \'paths\' for module. If not found, return \'None\'. If found, return the extracted version attribute, or \'default\' if no version attribute was specified, or the value cannot be determined without importing the module. The version is formatted according to the requirement\'s version format (if any), unless it is \'None\' or the supplied \'default\'.'
def get_version(self, paths=None, default='unknown'):
if (self.attribute is None): try: (f, p, i) = find_module(self.module, paths) if f: f.close() return default except ImportError: return None v = get_module_constant(self.module, self.attribute, default, paths) if ((v is not None) and (v is not default) and (self.format is not None)): return self.format(v) return v
'Return true if dependency is present on \'paths\''
def is_present(self, paths=None):
return (self.get_version(paths) is not None)
'Return true if dependency is present and up-to-date on \'paths\''
def is_current(self, paths=None):
version = self.get_version(paths) if (version is None): return False return self.version_ok(version)
'Process features after parsing command line options'
def parse_command_line(self):
result = _Distribution.parse_command_line(self) if self.features: self._finalize_features() return result
'Convert feature name to corresponding option attribute name'
def _feature_attrname(self, name):
return ('with_' + name.replace('-', '_'))
'Resolve pre-setup requirements'
def fetch_build_eggs(self, requires):
from pkg_resources import working_set, parse_requirements for dist in working_set.resolve(parse_requirements(requires), installer=self.fetch_build_egg): working_set.add(dist)
'Fetch an egg needed for building'
def fetch_build_egg(self, req):
try: cmd = self._egg_fetcher except AttributeError: from setuptools.command.easy_install import easy_install dist = self.__class__({'script_args': ['easy_install']}) dist.parse_config_files() opts = dist.get_option_dict('easy_install') keep = ('find_links', 'site_dirs', 'index_url', 'optimize', 'site_dirs', 'allow_hosts') for key in opts.keys(): if (key not in keep): del opts[key] if self.dependency_links: links = self.dependency_links[:] if ('find_links' in opts): links = (opts['find_links'][1].split() + links) opts['find_links'] = ('setup', links) cmd = easy_install(dist, args=['x'], install_dir=os.curdir, exclude_scripts=True, always_copy=False, build_directory=None, editable=False, upgrade=False, multi_version=True, no_report=True) cmd.ensure_finalized() self._egg_fetcher = cmd return cmd.easy_install(req)
'Add --with-X/--without-X options based on optional features'
def _set_global_opts_from_features(self):
go = [] no = self.negative_opt.copy() for (name, feature) in self.features.items(): self._set_feature(name, None) feature.validate(self) if feature.optional: descr = feature.description incdef = ' (default)' excdef = '' if (not feature.include_by_default()): (excdef, incdef) = (incdef, excdef) go.append((('with-' + name), None, (('include ' + descr) + incdef))) go.append((('without-' + name), None, (('exclude ' + descr) + excdef))) no[('without-' + name)] = ('with-' + name) self.global_options = self.feature_options = (go + self.global_options) self.negative_opt = self.feature_negopt = no
'Add/remove features and resolve dependencies between them'
def _finalize_features(self):
for (name, feature) in self.features.items(): enabled = self.feature_is_included(name) if (enabled or ((enabled is None) and feature.include_by_default())): feature.include_in(self) self._set_feature(name, 1) for (name, feature) in self.features.items(): if (not self.feature_is_included(name)): feature.exclude_from(self) self._set_feature(name, 0)
'Pluggable version of get_command_class()'
def get_command_class(self, command):
if (command in self.cmdclass): return self.cmdclass[command] for ep in pkg_resources.iter_entry_points('distutils.commands', command): ep.require(installer=self.fetch_build_egg) self.cmdclass[command] = cmdclass = ep.load() return cmdclass else: return _Distribution.get_command_class(self, command)
'Set feature\'s inclusion status'
def _set_feature(self, name, status):
setattr(self, self._feature_attrname(name), status)
'Return 1 if feature is included, 0 if excluded, \'None\' if unknown'
def feature_is_included(self, name):
return getattr(self, self._feature_attrname(name))
'Request inclusion of feature named \'name\''
def include_feature(self, name):
if (self.feature_is_included(name) == 0): descr = self.features[name].description raise DistutilsOptionError((descr + ' is required, but was excluded or is not available')) self.features[name].include_in(self) self._set_feature(name, 1)
'Add items to distribution that are named in keyword arguments For example, \'dist.exclude(py_modules=["x"])\' would add \'x\' to the distribution\'s \'py_modules\' attribute, if it was not already there. Currently, this method only supports inclusion for attributes that are lists or tuples. If you need to add support for adding to other attributes in this or a subclass, you can add an \'_include_X\' method, where \'X\' is the name of the attribute. The method will be called with the value passed to \'include()\'. So, \'dist.include(foo={"bar":"baz"})\' will try to call \'dist._include_foo({"bar":"baz"})\', which can then handle whatever special inclusion logic is needed.'
def include(self, **attrs):
for (k, v) in attrs.items(): include = getattr(self, ('_include_' + k), None) if include: include(v) else: self._include_misc(k, v)
'Remove packages, modules, and extensions in named package'
def exclude_package(self, package):
pfx = (package + '.') if self.packages: self.packages = [p for p in self.packages if ((p != package) and (not p.startswith(pfx)))] if self.py_modules: self.py_modules = [p for p in self.py_modules if ((p != package) and (not p.startswith(pfx)))] if self.ext_modules: self.ext_modules = [p for p in self.ext_modules if ((p.name != package) and (not p.name.startswith(pfx)))]
'Return true if \'exclude_package(package)\' would do something'
def has_contents_for(self, package):
pfx = (package + '.') for p in self.iter_distribution_names(): if ((p == package) or p.startswith(pfx)): return True
'Handle \'exclude()\' for list/tuple attrs without a special handler'
def _exclude_misc(self, name, value):
if (not isinstance(value, sequence)): raise DistutilsSetupError(('%s: setting must be a list or tuple (%r)' % (name, value))) try: old = getattr(self, name) except AttributeError: raise DistutilsSetupError(('%s: No such distribution setting' % name)) if ((old is not None) and (not isinstance(old, sequence))): raise DistutilsSetupError((name + ': this setting cannot be changed via include/exclude')) elif old: setattr(self, name, [item for item in old if (item not in value)])
'Handle \'include()\' for list/tuple attrs without a special handler'
def _include_misc(self, name, value):
if (not isinstance(value, sequence)): raise DistutilsSetupError(('%s: setting must be a list (%r)' % (name, value))) try: old = getattr(self, name) except AttributeError: raise DistutilsSetupError(('%s: No such distribution setting' % name)) if (old is None): setattr(self, name, value) elif (not isinstance(old, sequence)): raise DistutilsSetupError((name + ': this setting cannot be changed via include/exclude')) else: setattr(self, name, (old + [item for item in value if (item not in old)]))
'Remove items from distribution that are named in keyword arguments For example, \'dist.exclude(py_modules=["x"])\' would remove \'x\' from the distribution\'s \'py_modules\' attribute. Excluding packages uses the \'exclude_package()\' method, so all of the package\'s contained packages, modules, and extensions are also excluded. Currently, this method only supports exclusion from attributes that are lists or tuples. If you need to add support for excluding from other attributes in this or a subclass, you can add an \'_exclude_X\' method, where \'X\' is the name of the attribute. The method will be called with the value passed to \'exclude()\'. So, \'dist.exclude(foo={"bar":"baz"})\' will try to call \'dist._exclude_foo({"bar":"baz"})\', which can then handle whatever special exclusion logic is needed.'
def exclude(self, **attrs):
for (k, v) in attrs.items(): exclude = getattr(self, ('_exclude_' + k), None) if exclude: exclude(v) else: self._exclude_misc(k, v)
'Return a \'{cmd: {opt:val}}\' map of all command-line options Option names are all long, but do not include the leading \'--\', and contain dashes rather than underscores. If the option doesn\'t take an argument (e.g. \'--quiet\'), the \'val\' is \'None\'. Note that options provided by config files are intentionally excluded.'
def get_cmdline_options(self):
d = {} for (cmd, opts) in self.command_options.items(): for (opt, (src, val)) in opts.items(): if (src != 'command line'): continue opt = opt.replace('_', '-') if (val == 0): cmdobj = self.get_command_obj(cmd) neg_opt = self.negative_opt.copy() neg_opt.update(getattr(cmdobj, 'negative_opt', {})) for (neg, pos) in neg_opt.items(): if (pos == opt): opt = neg val = None break else: raise AssertionError("Shouldn't be able to get here") elif (val == 1): val = None d.setdefault(cmd, {})[opt] = val return d
'Yield all packages, modules, and extension names in distribution'
def iter_distribution_names(self):
for pkg in (self.packages or ()): (yield pkg) for module in (self.py_modules or ()): (yield module) for ext in (self.ext_modules or ()): if isinstance(ext, tuple): (name, buildinfo) = ext else: name = ext.name if name.endswith('module'): name = name[:(-6)] (yield name)
'Should this feature be included by default?'
def include_by_default(self):
return (self.available and self.standard)
'Ensure feature and its requirements are included in distribution You may override this in a subclass to perform additional operations on the distribution. Note that this method may be called more than once per feature, and so should be idempotent.'
def include_in(self, dist):
if (not self.available): raise DistutilsPlatformError((self.description + ' is required,but is not available on this platform')) dist.include(**self.extras) for f in self.require_features: dist.include_feature(f)
'Ensure feature is excluded from distribution You may override this in a subclass to perform additional operations on the distribution. This method will be called at most once per feature, and only after all included features have been asked to include themselves.'
def exclude_from(self, dist):
dist.exclude(**self.extras) if self.remove: for item in self.remove: dist.exclude_package(item)
'Verify that feature makes sense in context of distribution This method is called by the distribution just before it parses its command line. It checks to ensure that the \'remove\' attribute, if any, contains only valid package/module names that are present in the base distribution when \'setup()\' is called. You may override it in a subclass to perform any other required validation of the feature against a target distribution.'
def validate(self, dist):
for item in self.remove: if (not dist.has_contents_for(item)): raise DistutilsSetupError(("%s wants to be able to remove %s, but the distribution doesn't contain any packages or modules under %s" % (self.description, item, item)))
'Run \'func\' under os sandboxing'
def run(self, func):
try: self._copy(self) __builtin__.file = self._file __builtin__.open = self._open self._active = True return func() finally: self._active = False __builtin__.open = _file __builtin__.file = _open self._copy(_os)
'Called to remap or validate any path, whether input or output'
def _validate_path(self, path):
return path
'Called for path inputs'
def _remap_input(self, operation, path, *args, **kw):
return self._validate_path(path)
'Called for path outputs'
def _remap_output(self, operation, path):
return self._validate_path(path)
'Called for path pairs like rename, link, and symlink operations'
def _remap_pair(self, operation, src, dst, *args, **kw):
return (self._remap_input((operation + '-from'), src, *args, **kw), self._remap_input((operation + '-to'), dst, *args, **kw))
'Called for path inputs'
def _remap_input(self, operation, path, *args, **kw):
if ((operation in self.write_ops) and (not self._ok(path))): self._violation(operation, os.path.realpath(path), *args, **kw) return path
'Called for path pairs like rename, link, and symlink operations'
def _remap_pair(self, operation, src, dst, *args, **kw):
if ((not self._ok(src)) or (not self._ok(dst))): self._violation(operation, src, dst, *args, **kw) return (src, dst)
'Called for low-level os.open()'
def open(self, file, flags, mode=511):
if ((flags & WRITE_FLAGS) and (not self._ok(file))): self._violation('os.open', file, flags, mode) return _os.open(file, flags, mode)
'Evaluate a URL as a possible download, and maybe retrieve it'
def process_url(self, url, retrieve=False):
if ((url in self.scanned_urls) and (not retrieve)): return self.scanned_urls[url] = True if (not URL_SCHEME(url)): self.process_filename(url) return else: dists = list(distros_for_url(url)) if dists: if (not self.url_ok(url)): return self.debug('Found link: %s', url) if (dists or (not retrieve) or (url in self.fetched_urls)): map(self.add, dists) return if (not self.url_ok(url)): self.fetched_urls[url] = True return self.info('Reading %s', url) self.fetched_urls[url] = True f = self.open_url(url, 'Download error: %s -- Some packages may not be found!') if (f is None): return self.fetched_urls[f.url] = True if ('html' not in f.headers.get('content-type', '').lower()): f.close() return base = f.url page = f.read() f.close() if (url.startswith(self.index_url) and (getattr(f, 'code', None) != 404)): page = self.process_index(url, page) for match in HREF.finditer(page): link = urlparse.urljoin(base, htmldecode(match.group(1))) self.process_url(link)
'Process the contents of a PyPI page'
def process_index(self, url, page):
def scan(link): if link.startswith(self.index_url): parts = map(urllib2.unquote, link[len(self.index_url):].split('/')) if ((len(parts) == 2) and ('#' not in parts[1])): pkg = safe_name(parts[0]) ver = safe_version(parts[1]) self.package_pages.setdefault(pkg.lower(), {})[link] = True return (to_filename(pkg), to_filename(ver)) return (None, None) for match in HREF.finditer(page): scan(urlparse.urljoin(url, htmldecode(match.group(1)))) (pkg, ver) = scan(url) if pkg: for new_url in find_external_links(url, page): (base, frag) = egg_info_for_url(new_url) if (base.endswith('.py') and (not frag)): if ver: new_url += ('#egg=%s-%s' % (pkg, ver)) else: self.need_version_info(url) self.scan_url(new_url) return PYPI_MD5.sub((lambda m: ('<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2))), page) else: return ''
'Add `urls` to the list that will be prescanned for searches'
def add_find_links(self, urls):
for url in urls: if ((self.to_scan is None) or (not URL_SCHEME(url)) or url.startswith('file:') or list(distros_for_url(url))): self.scan_url(url) else: self.to_scan.append(url)
'Scan urls scheduled for prescanning (e.g. --find-links)'
def prescan(self):
if self.to_scan: map(self.scan_url, self.to_scan) self.to_scan = None
'Locate and/or download `spec` to `tmpdir`, returning a local path `spec` may be a ``Requirement`` object, or a string containing a URL, an existing local filename, or a project/version requirement spec (i.e. the string form of a ``Requirement`` object). If it is the URL of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is automatically created alongside the downloaded file. If `spec` is a ``Requirement`` object or a string containing a project/version requirement spec, this method returns the location of a matching distribution (possibly after downloading it to `tmpdir`). If `spec` is a locally existing file or directory name, it is simply returned unchanged. If `spec` is a URL, it is downloaded to a subpath of `tmpdir`, and the local filename is returned. Various errors may be raised if a problem occurs during downloading.'
def download(self, spec, tmpdir):
if (not isinstance(spec, Requirement)): scheme = URL_SCHEME(spec) if scheme: found = self._download_url(scheme.group(1), spec, tmpdir) (base, fragment) = egg_info_for_url(spec) if base.endswith('.py'): found = self.gen_setup(found, fragment, tmpdir) return found elif os.path.exists(spec): return spec else: try: spec = Requirement.parse(spec) except ValueError: raise DistutilsError(('Not a URL, existing file, or requirement spec: %r' % (spec,))) return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
'Obtain a distribution suitable for fulfilling `requirement` `requirement` must be a ``pkg_resources.Requirement`` instance. If necessary, or if the `force_scan` flag is set, the requirement is searched for in the (online) package index as well as the locally installed packages. If a distribution matching `requirement` is found, the returned distribution\'s ``location`` is the value you would have gotten from calling the ``download()`` method with the matching distribution\'s URL or filename. If no matching distribution is found, ``None`` is returned. If the `source` flag is set, only source distributions and source checkout links will be considered. Unless the `develop_ok` flag is set, development and system eggs (i.e., those using the ``.egg-info`` format) will be ignored.'
def fetch_distribution(self, requirement, tmpdir, force_scan=False, source=False, develop_ok=False, local_index=None):
self.info('Searching for %s', requirement) skipped = {} dist = None def find(env, req): for dist in env[req.key]: if ((dist.precedence == DEVELOP_DIST) and (not develop_ok)): if (dist not in skipped): self.warn('Skipping development or system egg: %s', dist) skipped[dist] = 1 continue if ((dist in req) and ((dist.precedence <= SOURCE_DIST) or (not source))): return dist if force_scan: self.prescan() self.find_packages(requirement) dist = find(self, requirement) if (local_index is not None): dist = (dist or find(local_index, requirement)) if ((dist is None) and (self.to_scan is not None)): self.prescan() dist = find(self, requirement) if ((dist is None) and (not force_scan)): self.find_packages(requirement) dist = find(self, requirement) if (dist is None): self.warn('No local packages or download links found for %s%s', ((source and 'a source distribution of ') or ''), requirement) self.info('Best match: %s', dist) return dist.clone(location=self.download(dist.location, tmpdir))
'Obtain a file suitable for fulfilling `requirement` DEPRECATED; use the ``fetch_distribution()`` method now instead. For backward compatibility, this routine is identical but returns the ``location`` of the downloaded distribution instead of a distribution object.'
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
dist = self.fetch_distribution(requirement, tmpdir, force_scan, source) if (dist is not None): return dist.location return None
'Constructor. Args: host: The hostname the connection was made to. cert: The SSL certificate (as a dictionary) the host returned. reason: user readable error reason.'
def __init__(self, host, cert, reason):
httplib.HTTPException.__init__(self) self.host = host self.cert = cert self.reason = reason
'Return (scheme, user, password, host/port) given a URL or an authority. If a URL is supplied, it must have an authority (host:port) component. According to RFC 3986, having an authority component means the URL must have two slashes after the scheme: >>> _parse_proxy(\'file:/ftp.example.com/\') Traceback (most recent call last): ValueError: proxy URL with no authority: \'file:/ftp.example.com/\' The first three items of the returned tuple may be None. Examples of authority parsing: >>> _parse_proxy(\'proxy.example.com\') (None, None, None, \'proxy.example.com\') >>> _parse_proxy(\'proxy.example.com:3128\') (None, None, None, \'proxy.example.com:3128\') The authority component may optionally include userinfo (assumed to be username:password): >>> _parse_proxy(\'joe:[email protected]\') (None, \'joe\', \'password\', \'proxy.example.com\') >>> _parse_proxy(\'joe:[email protected]:3128\') (None, \'joe\', \'password\', \'proxy.example.com:3128\') Same examples, but with URLs instead: >>> _parse_proxy(\'http://proxy.example.com/\') (\'http\', None, None, \'proxy.example.com\') >>> _parse_proxy(\'http://proxy.example.com:3128/\') (\'http\', None, None, \'proxy.example.com:3128\') >>> _parse_proxy(\'http://joe:[email protected]/\') (\'http\', \'joe\', \'password\', \'proxy.example.com\') >>> _parse_proxy(\'http://joe:[email protected]:3128\') (\'http\', \'joe\', \'password\', \'proxy.example.com:3128\') Everything after the authority is ignored: >>> _parse_proxy(\'ftp://joe:[email protected]/rubbish:3128\') (\'ftp\', \'joe\', \'password\', \'proxy.example.com\') Test for no trailing \'/\' case: >>> _parse_proxy(\'http://joe:[email protected]\') (\'http\', \'joe\', \'password\', \'proxy.example.com\')'
def _parse_proxy(self, proxy):
(scheme, r_scheme) = splittype(proxy) if (not r_scheme.startswith('/')): scheme = None authority = proxy else: if (not r_scheme.startswith('//')): raise ValueError(('proxy URL with no authority: %r' % proxy)) end = r_scheme.find('/', 2) if (end == (-1)): end = None authority = r_scheme[2:end] (userinfo, hostport) = splituser(authority) if (userinfo is not None): (user, password) = splitpasswd(userinfo) else: user = password = None return (scheme, user, password, hostport)
'Processes HTTP responses. Args: request: An HTTP request object. response: An HTTP response object. Returns: The HTTP response object.'
def http_response(self, request, response):
return response
'Starts an AppServer instance on this machine. Args: project_id: A string specifying a project ID.'
def post(self, project_id):
try: config = json_decode(self.request.body) except ValueError: raise HTTPError(HTTPCodes.BAD_REQUEST, 'Payload must be valid JSON') if (start_app(project_id, config) == BAD_PID): raise HTTPError(HTTPCodes.INTERNAL_ERROR, 'Unable to start application')