desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Transform a Python signature into RST nodes. Returns (fully qualified name of the thing, classname if any). If inside a class, the current class name is handled intelligently: * it is stripped from the displayed name if present * it is added to the full name (return value) if not present'
def parse_signature(self, sig, signode):
m = py_sig_re.match(sig) if (m is None): raise ValueError (classname, name, arglist, retann) = m.groups() if self.env.currclass: add_module = False if (classname and classname.startswith(self.env.currclass)): fullname = (classname + name) classname = classname[len(self.env.currclass):].lstrip('.') elif classname: fullname = (((self.env.currclass + '.') + classname) + name) else: fullname = ((self.env.currclass + '.') + name) else: add_module = True fullname = ((classname and (classname + name)) or name) prefix = self.get_signature_prefix(sig) if prefix: signode += addnodes.desc_annotation(prefix, prefix) if classname: signode += addnodes.desc_addname(classname, classname) elif (add_module and self.env.config.add_module_names): modname = self.options.get('module', self.env.currmodule) if (modname and (modname != 'exceptions')): nodetext = (modname + '.') signode += addnodes.desc_addname(nodetext, nodetext) signode += addnodes.desc_name(name, name) if (not arglist): if self.needs_arglist(): signode += addnodes.desc_parameterlist() if retann: signode += addnodes.desc_returns(retann, retann) return (fullname, classname) signode += addnodes.desc_parameterlist() stack = [signode[(-1)]] for token in py_paramlist_re.split(arglist): if (token == '['): opt = addnodes.desc_optional() stack[(-1)] += opt stack.append(opt) elif (token == ']'): try: stack.pop() except IndexError: raise ValueError elif ((not token) or (token == ',') or token.isspace()): pass else: token = token.strip() stack[(-1)] += addnodes.desc_parameter(token, token) if (len(stack) != 1): raise ValueError if retann: signode += addnodes.desc_returns(retann, retann) return (fullname, classname)
'Return the text for the index entry of the object.'
def get_index_text(self, modname, name):
raise NotImplementedError('must be implemented in subclasses')
'Transform a C (or C++) signature into RST nodes.'
def parse_signature(self, sig, signode):
m = c_funcptr_sig_re.match(sig) if (m is None): m = c_sig_re.match(sig) if (m is None): raise ValueError('no match') (rettype, name, arglist, const) = m.groups() signode += addnodes.desc_type('', '') self._parse_type(signode[(-1)], rettype) try: (classname, funcname) = name.split('::', 1) classname += '::' signode += addnodes.desc_addname(classname, classname) signode += addnodes.desc_name(funcname, funcname) except ValueError: signode += addnodes.desc_name(name, name) m = c_funcptr_name_re.match(name) if m: name = m.group(1) if (not arglist): if (self.desctype == 'cfunction'): signode += addnodes.desc_parameterlist() if const: signode += addnodes.desc_addname(const, const) return name paramlist = addnodes.desc_parameterlist() arglist = arglist.replace('`', '').replace('\\ ', '') args = arglist.split(',') for arg in args: arg = arg.strip() param = addnodes.desc_parameter('', '', noemph=True) try: (ctype, argname) = arg.rsplit(' ', 1) except ValueError: self._parse_type(param, arg) else: self._parse_type(param, ctype) param += nodes.emphasis((' ' + argname), (' ' + argname)) paramlist += param signode += paramlist if const: signode += addnodes.desc_addname(const, const) return name
'Transform an option description into RST nodes.'
def parse_signature(self, sig, signode):
count = 0 firstname = '' for m in option_desc_re.finditer(sig): (optname, args) = m.groups() if count: signode += addnodes.desc_addname(', ', ', ') signode += addnodes.desc_name(optname, optname) signode += addnodes.desc_addname(args, args) if (not count): firstname = optname count += 1 if (not firstname): raise ValueError return firstname
'getattr() override for types such as Zope interfaces.'
@staticmethod def get_attr(obj, name, *defargs):
for (typ, func) in AutoDirective._special_attrgetters.iteritems(): if isinstance(obj, typ): return func(obj, name, *defargs) return safe_getattr(obj, name, *defargs)
'Called to see if a member can be documented by this documenter.'
@classmethod def can_document_member(cls, member, membername, isattr, parent):
raise NotImplementedError('must be implemented in subclasses')
'Append one line of generated reST to the output.'
def add_line(self, line, source, *lineno):
self.directive.result.append((self.indent + line), source, *lineno)
'Resolve the module and name of the object to document given by the arguments and the current module/class. Must return a pair of the module name and a chain of attributes; for example, it would return ``(\'zipfile\', [\'ZipFile\', \'open\'])`` for the ``zipfile.ZipFile.open`` method.'
def resolve_name(self, modname, parents, path, base):
raise NotImplementedError('must be implemented in subclasses')
'Determine what module to import and what attribute to document. Returns True and sets *self.modname*, *self.objpath*, *self.fullname*, *self.args* and *self.retann* if parsing and resolving was successful.'
def parse_name(self):
try: (explicit_modname, path, base, args, retann) = py_ext_sig_re.match(self.name).groups() except AttributeError: self.directive.warn(('invalid signature for auto%s (%r)' % (self.objtype, self.name))) return False if (explicit_modname is not None): modname = explicit_modname[:(-2)] parents = ((path and path.rstrip('.').split('.')) or []) else: modname = None parents = [] (self.modname, self.objpath) = self.resolve_name(modname, parents, path, base) if (not self.modname): return False self.args = args self.retann = retann self.fullname = ((self.modname or '') + ((self.objpath and ('.' + '.'.join(self.objpath))) or '')) return True
'Import the object given by *self.modname* and *self.objpath* and sets it as *self.object*. Returns True if successful, False if an error occurred.'
def import_object(self):
try: __import__(self.modname) obj = self.module = sys.modules[self.modname] for part in self.objpath: obj = self.get_attr(obj, part) self.object = obj return True except (SyntaxError, ImportError, AttributeError) as err: self.directive.warn(('autodoc can\'t import/find %s %r, it reported error: "%s", please check your spelling and sys.path' % (self.objtype, str(self.fullname), err))) return False
'Get the real module name of an object to document. (It can differ from the name of the module through which the object was imported.)'
def get_real_modname(self):
return (self.get_attr(self.object, '__module__', None) or self.modname)
'Check if *self.object* is really defined in the module given by *self.modname*.'
def check_module(self):
modname = self.get_attr(self.object, '__module__', None) if (modname and (modname != self.modname)): return False return True
'Format the argument signature of *self.object*. Should return None if the object does not have a signature.'
def format_args(self):
return None
'Format the signature (arguments and return annotation) of the object. Let the user process it via the ``autodoc-process-signature`` event.'
def format_signature(self):
if (self.args is not None): args = ('(%s)' % self.args) else: args = self.format_args() if (args is None): return '' retann = self.retann result = self.env.app.emit_firstresult('autodoc-process-signature', self.objtype, self.fullname, self.object, self.options, args, retann) if result: (args, retann) = result if (args is not None): return (args + ((retann and (' -> %s' % retann)) or '')) else: return ''
'Add the directive header and options to the generated content.'
def add_directive_header(self, sig):
directive = getattr(self, 'directivetype', self.objtype) name_in_directive = ('.'.join(self.objpath) or self.modname) self.add_line((u'.. %s:: %s%s' % (directive, name_in_directive, sig)), '<autodoc>') if self.options.noindex: self.add_line(u' :noindex:', '<autodoc>') if self.objpath: self.add_line((u' :module: %s' % self.modname), '<autodoc>')
'Decode and return lines of the docstring(s) for the object.'
def get_doc(self, encoding=None):
docstring = self.get_attr(self.object, '__doc__', None) if docstring: return [prepare_docstring(force_decode(docstring, encoding))] return []
'Let the user process the docstrings before adding them.'
def process_doc(self, docstrings):
for docstringlines in docstrings: if self.env.app: self.env.app.emit('autodoc-process-docstring', self.objtype, self.fullname, self.object, self.options, docstringlines) for line in docstringlines: (yield line)
'Add content from docstrings, attribute documentation and user.'
def add_content(self, more_content, no_docstring=False):
if self.analyzer: filename = unicode(self.analyzer.srcname, sys.getfilesystemencoding(), 'replace') sourcename = (u'%s:docstring of %s' % (filename, self.fullname)) attr_docs = self.analyzer.find_attr_docs() if self.objpath: key = ('.'.join(self.objpath[:(-1)]), self.objpath[(-1)]) if (key in attr_docs): no_docstring = True docstrings = [attr_docs[key]] for (i, line) in enumerate(self.process_doc(docstrings)): self.add_line(line, sourcename, i) else: sourcename = (u'docstring of %s' % self.fullname) if (not no_docstring): encoding = (self.analyzer and self.analyzer.encoding) docstrings = self.get_doc(encoding) for (i, line) in enumerate(self.process_doc(docstrings)): self.add_line(line, sourcename, i) if more_content: for (line, src) in zip(more_content.data, more_content.items): self.add_line(line, src[0], src[1])
'Return `(members_check_module, members)` where `members` is a list of `(membername, member)` pairs of the members of *self.object*. If *want_all* is True, return all members. Else, only return those members given by *self.options.members* (which may also be none).'
def get_object_members(self, want_all):
if (not want_all): if (not self.options.members): return (False, []) ret = [] for mname in self.options.members: try: ret.append((mname, self.get_attr(self.object, mname))) except AttributeError: self.directive.warn(('missing attribute %s in object %s' % (mname, self.fullname))) return (False, ret) elif self.options.inherited_members: return (False, safe_getmembers(self.object)) else: return (False, sorted([(mname, self.get_attr(self.object, mname, None)) for mname in self.get_attr(self.object, '__dict__').keys()]))
'Filter the given member list: members are skipped if - they are private (except if given explicitly) - they are undocumented (except if undoc-members is given) The user can override the skipping decision by connecting to the ``autodoc-skip-member`` event.'
def filter_members(self, members, want_all):
ret = [] namespace = '.'.join(self.objpath) if self.analyzer: attr_docs = self.analyzer.find_attr_docs() else: attr_docs = {} for (membername, member) in members: isattr = False if (want_all and membername.startswith('_')): skip = True elif ((namespace, membername) in attr_docs): skip = False isattr = True else: doc = self.get_attr(member, '__doc__', None) skip = ((not self.options.undoc_members) and (not doc)) if self.env.app: skip_user = self.env.app.emit_firstresult('autodoc-skip-member', self.objtype, membername, member, skip, self.options) if (skip_user is not None): skip = skip_user if skip: continue ret.append((membername, member, isattr)) return ret
'Generate reST for member documentation. If *all_members* is True, do all members, else those given by *self.options.members*.'
def document_members(self, all_members=False):
self.env.autodoc_current_module = self.modname if self.objpath: self.env.autodoc_current_class = self.objpath[0] want_all = (all_members or self.options.inherited_members or (self.options.members is ALL)) (members_check_module, members) = self.get_object_members(want_all) if self.options.exclude_members: members = [(membername, member) for (membername, member) in members if (membername not in self.options.exclude_members)] memberdocumenters = [] for (mname, member, isattr) in self.filter_members(members, want_all): classes = [cls for cls in AutoDirective._registry.itervalues() if cls.can_document_member(member, mname, isattr, self)] if (not classes): continue classes.sort(key=(lambda cls: cls.priority)) full_mname = ((self.modname + '::') + '.'.join((self.objpath + [mname]))) memberdocumenters.append(classes[(-1)](self.directive, full_mname, self.indent)) if ((self.options.member_order or self.env.config.autodoc_member_order) == 'groupwise'): memberdocumenters.sort(key=(lambda d: d.member_order)) for documenter in memberdocumenters: documenter.generate(all_members=True, real_modname=self.real_modname, check_module=members_check_module) self.env.autodoc_current_module = None self.env.autodoc_current_class = None
'Generate reST for the object given by *self.name*, and possibly members. If *more_content* is given, include that content. If *real_modname* is given, use that module name to find attribute docs. If *check_module* is True, only generate if the object is defined in the module name it is imported from. If *all_members* is True, document all members.'
def generate(self, more_content=None, real_modname=None, check_module=False, all_members=False):
if (not self.parse_name()): self.directive.warn(('don\'t know which module to import for autodocumenting %r (try placing a "module" or "currentmodule" directive in the document, or giving an explicit module name)' % self.name)) return if (not self.import_object()): return self.real_modname = (real_modname or self.get_real_modname()) try: self.analyzer = ModuleAnalyzer.for_module(self.real_modname) self.analyzer.find_attr_docs() except PycodeError as err: self.analyzer = None if (hasattr(self.module, '__file__') and self.module.__file__): self.directive.filename_set.add(self.module.__file__) else: self.directive.filename_set.add(self.analyzer.srcname) if check_module: if (not self.check_module()): return self.add_line(u'', '') try: sig = self.format_signature() except Exception as err: self.directive.warn(('error while formatting signature for %s: %s' % (self.fullname, err))) sig = '' self.add_directive_header(sig) self.add_line(u'', '<autodoc>') self.indent += self.content_indent self.add_content(more_content) self.document_members(all_members)
'*class_names* is a list of child classes to show bases from. If *show_builtins* is True, then Python builtins will be shown in the graph.'
def __init__(self, class_names, currmodule, show_builtins=False):
self.class_names = class_names self.classes = self._import_classes(class_names, currmodule) self.all_classes = self._all_classes(self.classes) if (len(self.all_classes) == 0): raise InheritanceException('No classes found for inheritance diagram') self.show_builtins = show_builtins
'Import a class using its fully-qualified *name*.'
def _import_class_or_module(self, name, currmodule):
try: (path, base) = class_sig_re.match(name).groups() except ValueError: raise InheritanceException(('Invalid class or module %r specified for inheritance diagram' % name)) fullname = ((path or '') + base) path = ((path and path.rstrip('.')) or '') try: module = __import__(fullname) todoc = sys.modules[fullname] except ImportError: if (not path): if currmodule: path = currmodule else: raise InheritanceException(('Could not import class %r specified for inheritance diagram' % base)) try: module = __import__(path) todoc = getattr(sys.modules[path], base) except (ImportError, AttributeError): raise InheritanceException(('Could not import class or module %r specified for inheritance diagram' % ((path + '.') + base))) if inspect.isclass(todoc): return [todoc] elif inspect.ismodule(todoc): classes = [] for cls in todoc.__dict__.values(): if (inspect.isclass(cls) and (cls.__module__ == todoc.__name__)): classes.append(cls) return classes raise InheritanceException(('%r specified for inheritance diagram is not a class or module' % name))
'Import a list of classes.'
def _import_classes(self, class_names, currmodule):
classes = [] for name in class_names: classes.extend(self._import_class_or_module(name, currmodule)) return classes
'Return a list of all classes that are ancestors of *classes*.'
def _all_classes(self, classes):
all_classes = {} def recurse(cls): all_classes[cls] = None for c in cls.__bases__: if (c not in all_classes): recurse(c) for cls in classes: recurse(cls) return all_classes.keys()
'Given a class object, return a fully-qualified name. This works for things I\'ve tested in matplotlib so far, but may not be completely general.'
def class_name(self, cls, parts=0):
module = cls.__module__ if (module == '__builtin__'): fullname = cls.__name__ else: fullname = ('%s.%s' % (module, cls.__name__)) if (parts == 0): return fullname name_parts = fullname.split('.') return '.'.join(name_parts[(- parts):])
'Get all of the class names involved in the graph.'
def get_all_class_names(self):
return [self.class_name(x) for x in self.all_classes]
'Generate a graphviz dot graph from the classes that were passed in to __init__. *name* is the name of the graph. *urls* is a dictionary mapping class names to HTTP URLs. *graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing key/value pairs to pass on as graphviz properties.'
def generate_dot(self, name, parts=0, urls={}, env=None, graph_attrs={}, node_attrs={}, edge_attrs={}):
g_attrs = self.default_graph_attrs.copy() n_attrs = self.default_node_attrs.copy() e_attrs = self.default_edge_attrs.copy() g_attrs.update(graph_attrs) n_attrs.update(node_attrs) e_attrs.update(edge_attrs) if env: g_attrs.update(env.config.inheritance_graph_attrs) n_attrs.update(env.config.inheritance_node_attrs) e_attrs.update(env.config.inheritance_edge_attrs) res = [] res.append(('digraph %s {\n' % name)) res.append(self._format_graph_attrs(g_attrs)) for cls in self.all_classes: if ((not self.show_builtins) and (cls in __builtins__.values())): continue name = self.class_name(cls, parts) this_node_attrs = n_attrs.copy() url = urls.get(self.class_name(cls)) if (url is not None): this_node_attrs['URL'] = ('"%s"' % url) res.append((' "%s" [%s];\n' % (name, self._format_node_attrs(this_node_attrs)))) for base in cls.__bases__: if ((not self.show_builtins) and (base in __builtins__.values())): continue base_name = self.class_name(base, parts) res.append((' "%s" -> "%s" [%s];\n' % (base_name, name, self._format_node_attrs(e_attrs)))) res.append('}\n') return ''.join(res)
'Import and setup a Sphinx extension module. No-op if called twice.'
def setup_extension(self, extension):
if (extension in self._extensions): return try: mod = __import__(extension, None, None, ['setup']) except ImportError as err: raise ExtensionError(('Could not import extension %s' % extension), err) if (not hasattr(mod, 'setup')): self.warn(('extension %r has no setup() function; is it really a Sphinx extension module?' % extension)) else: mod.setup(self) self._extensions[extension] = mod
'Import an object from a \'module.name\' string.'
def import_object(self, objname, source=None):
try: (module, name) = objname.rsplit('.', 1) except ValueError as err: raise ExtensionError((('Invalid full object name %s' % objname) + ((source and (' (needed for %s)' % source)) or '')), err) try: return getattr(__import__(module, None, None, [name]), name) except ImportError as err: raise ExtensionError((('Could not import %s' % module) + ((source and (' (needed for %s)' % source)) or '')), err) except AttributeError as err: raise ExtensionError((('Could not find %s' % objname) + ((source and (' (needed for %s)' % source)) or '')), err)
'Called by the builder to initialize the template system. *builder* is the builder object; you\'ll probably want to look at the value of ``builder.config.templates_path``. *theme* is a :class:`sphinx.theming.Theme` object or None; in the latter case, *dirs* can be list of fixed directories to look for templates.'
def init(self, builder, theme=None, dirs=None):
raise NotImplementedError('must be implemented in subclasses')
'Called by the builder to determine if output files are outdated because of template changes. Return the mtime of the newest template file that was changed. The default implementation returns ``0``.'
def newest_template_mtime(self):
return 0
'Called by the builder to render a template given as a filename with a specified context (a Python dictionary).'
def render(self, template, context):
raise NotImplementedError('must be implemented in subclasses')
'Called by the builder to render a template given as a string with a specified context (a Python dictionary).'
def render_string(self, template, context):
raise NotImplementedError('must be implemented in subclasses')
'Check whether a node represents an inline element.'
def is_inline(self, node):
return isinstance(node.parent, nodes.TextElement)
'line-block: * whitespace (including linebreaks) is significant * inline markup is supported. * serif typeface'
def visit_line_block(self, node):
self.body.append('{\\raggedright{}') self.literal_whitespace = 1
'The delimiter betweeen an option and its argument.'
def visit_option_argument(self, node):
self.body.append(node.get('delimiter', ' '))
'Remove all traces of a source file in the inventory.'
def clear_doc(self, docname):
if (docname in self.all_docs): self.all_docs.pop(docname, None) self.metadata.pop(docname, None) self.dependencies.pop(docname, None) self.titles.pop(docname, None) self.longtitles.pop(docname, None) self.tocs.pop(docname, None) self.toc_secnumbers.pop(docname, None) self.toc_num_entries.pop(docname, None) self.toctree_includes.pop(docname, None) self.filemodules.pop(docname, None) self.indexentries.pop(docname, None) self.glob_toctrees.discard(docname) self.numbered_toctrees.discard(docname) self.images.purge_doc(docname) self.dlfiles.purge_doc(docname) for (subfn, fnset) in self.files_to_rebuild.items(): fnset.discard(docname) if (not fnset): del self.files_to_rebuild[subfn] for (fullname, (fn, _)) in self.descrefs.items(): if (fn == docname): del self.descrefs[fullname] for (modname, (fn, _, _, _)) in self.modules.items(): if (fn == docname): del self.modules[modname] for (labelname, (fn, _, _)) in self.labels.items(): if (fn == docname): del self.labels[labelname] for (key, (fn, _)) in self.reftargets.items(): if (fn == docname): del self.reftargets[key] for (key, (fn, _)) in self.progoptions.items(): if (fn == docname): del self.progoptions[key] for (version, changes) in self.versionchanges.items(): new = [change for change in changes if (change[1] != docname)] changes[:] = new
'Return the filename for the document name. If base is True, return absolute path under self.srcdir. If base is None, return relative path to self.srcdir. If base is a path string, return absolute path under that. If suffix is not None, add it instead of config.source_suffix.'
def doc2path(self, docname, base=True, suffix=None):
suffix = (suffix or self.config.source_suffix) if (base is True): return (path.join(self.srcdir, docname.replace(SEP, path.sep)) + suffix) elif (base is None): return (docname.replace(SEP, path.sep) + suffix) else: return (path.join(base, docname.replace(SEP, path.sep)) + suffix)
'Find all source files in the source dir and put them in self.found_docs.'
def find_files(self, config):
exclude_dirs = [d.replace(SEP, path.sep) for d in config.exclude_dirs] exclude_trees = [d.replace(SEP, path.sep) for d in config.exclude_trees] self.found_docs = set(get_matching_docs(self.srcdir, config.source_suffix, exclude_docs=set(config.unused_docs), exclude_dirs=exclude_dirs, exclude_trees=exclude_trees, exclude_dirnames=(['_sources'] + config.exclude_dirnames)))
'Return (added, changed, removed) sets.'
def get_outdated_files(self, config_changed):
removed = (set(self.all_docs) - self.found_docs) added = set() changed = set() if config_changed: added = self.found_docs else: for docname in self.found_docs: if (docname not in self.all_docs): added.add(docname) continue if (not path.isfile(self.doc2path(docname, self.doctreedir, '.doctree'))): changed.add(docname) continue mtime = self.all_docs[docname] newmtime = path.getmtime(self.doc2path(docname)) if (newmtime > mtime): changed.add(docname) continue for dep in self.dependencies.get(docname, ()): try: deppath = path.join(self.srcdir, dep) if (not path.isfile(deppath)): changed.add(docname) break depmtime = path.getmtime(deppath) if (depmtime > mtime): changed.add(docname) break except EnvironmentError: changed.add(docname) break return (added, changed, removed)
'(Re-)read all files new or changed since last update. Returns a summary, the total count of documents to reread and an iterator that yields docnames as it processes them. Store all environment docnames in the canonical format (ie using SEP as a separator in place of os.path.sep).'
def update(self, config, srcdir, doctreedir, app=None):
config_changed = False if (self.config is None): msg = '[new config] ' config_changed = True else: for (key, descr) in config.values.iteritems(): if (descr[1] != 'env'): continue if (self.config[key] != config[key]): msg = '[config changed] ' config_changed = True break else: msg = '' if (self.config.extensions != config.extensions): msg = '[extensions changed] ' config_changed = True self.srcdir = srcdir self.doctreedir = doctreedir self.find_files(config) self.config = config (added, changed, removed) = self.get_outdated_files(config_changed) if (added or removed): changed.update(self.glob_toctrees) msg += ('%s added, %s changed, %s removed' % (len(added), len(changed), len(removed))) def update_generator(): self.app = app for docname in removed: if app: app.emit('env-purge-doc', self, docname) self.clear_doc(docname) to_read = (added | changed) for docname in sorted(to_read): (yield docname) self.read_doc(docname, app=app) if (config.master_doc not in self.all_docs): self.warn(None, ('master file %s not found' % self.doc2path(config.master_doc))) self.app = None if app: app.emit('env-updated', self) return (msg, len((added | changed)), update_generator())
'Custom decoding error handler that warns and replaces.'
def warn_and_replace(self, error):
linestart = error.object.rfind('\n', 0, error.start) lineend = error.object.find('\n', error.start) if (lineend == (-1)): lineend = len(error.object) lineno = (error.object.count('\n', 0, error.start) + 1) self.warn(self.docname, ('undecodable source characters, replacing with "?": %r' % ((((error.object[(linestart + 1):error.start] + '>>>') + error.object[error.start:error.end]) + '<<<') + error.object[error.end:lineend])), lineno) return (u'?', error.end)
'Parse a file and add/update inventory entries for the doctree. If srcpath is given, read from a different source file.'
def read_doc(self, docname, src_path=None, save_parsed=True, app=None):
if app: app.emit('env-purge-doc', self, docname) self.clear_doc(docname) if (src_path is None): src_path = self.doc2path(docname) if self.config.default_role: (role_fn, messages) = roles.role(self.config.default_role, english, 0, dummy_reporter) if role_fn: roles._roles[''] = role_fn else: self.warn(docname, ('default role %s not found' % self.config.default_role)) self.docname = docname self.settings['input_encoding'] = self.config.source_encoding self.settings['trim_footnote_reference_space'] = self.config.trim_footnote_reference_space codecs.register_error('sphinx', self.warn_and_replace) codecs.register_error('sphinx', self.warn_and_replace) class SphinxSourceClass(FileInput, ): def decode(self_, data): return data.decode(self_.encoding, 'sphinx') def read(self_): data = FileInput.read(self_) if app: arg = [data] app.emit('source-read', docname, arg) data = arg[0] if self.config.rst_epilog: return (((data + '\n') + self.config.rst_epilog) + '\n') else: return data pub = Publisher(reader=SphinxStandaloneReader(), writer=SphinxDummyWriter(), source_class=SphinxSourceClass, destination_class=NullOutput) pub.set_components(None, 'restructuredtext', None) pub.process_programmatic_settings(None, self.settings, None) pub.set_source(None, src_path) pub.set_destination(None, None) try: pub.publish() doctree = pub.document except UnicodeError as err: raise SphinxError(str(err)) self.filter_messages(doctree) self.process_dependencies(docname, doctree) self.process_images(docname, doctree) self.process_downloads(docname, doctree) self.process_metadata(docname, doctree) self.create_title_from(docname, doctree) self.note_labels_from(docname, doctree) self.note_indexentries_from(docname, doctree) self.note_citations_from(docname, doctree) self.build_toc_from(docname, doctree) self.all_docs[docname] = time.time() if app: app.emit('doctree-read', doctree) doctree.reporter = None doctree.transformer = None doctree.settings.warning_stream = None doctree.settings.env = None doctree.settings.record_dependencies = None for metanode in doctree.traverse(MetaBody.meta): metanode.__class__ = addnodes.meta self.docname = None self.currmodule = None self.currclass = None self.gloss_entries = set() if save_parsed: doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree') dirname = path.dirname(doctree_filename) if (not path.isdir(dirname)): os.makedirs(dirname) f = open(doctree_filename, 'wb') try: pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL) finally: f.close() else: return doctree
'Filter system messages from a doctree.'
def filter_messages(self, doctree):
filterlevel = ((self.config.keep_warnings and 2) or 5) for node in doctree.traverse(nodes.system_message): if (node['level'] < filterlevel): node.parent.remove(node)
'Process docutils-generated dependency info.'
def process_dependencies(self, docname, doctree):
cwd = os.getcwd() frompath = path.join(path.normpath(self.srcdir), 'dummy') deps = doctree.settings.record_dependencies if (not deps): return for dep in deps.list: relpath = relative_path(frompath, path.normpath(path.join(cwd, dep))) self.dependencies.setdefault(docname, set()).add(relpath)
'Process downloadable file paths.'
def process_downloads(self, docname, doctree):
docdir = path.dirname(self.doc2path(docname, base=None)) for node in doctree.traverse(addnodes.download_reference): targetname = node['reftarget'] if (targetname.startswith('/') or targetname.startswith(os.sep)): filepath = targetname[1:] else: filepath = path.normpath(path.join(docdir, node['reftarget'])) self.dependencies.setdefault(docname, set()).add(filepath) if (not os.access(path.join(self.srcdir, filepath), os.R_OK)): self.warn(docname, ('download file not readable: %s' % filepath), getattr(node, 'line', None)) continue uniquename = self.dlfiles.add_file(docname, filepath) node['filename'] = uniquename
'Process and rewrite image URIs.'
def process_images(self, docname, doctree):
docdir = path.dirname(self.doc2path(docname, base=None)) for node in doctree.traverse(nodes.image): node['candidates'] = candidates = {} imguri = node['uri'] if (imguri.find('://') != (-1)): self.warn(docname, ('nonlocal image URI found: %s' % imguri), node.line) candidates['?'] = imguri continue if (imguri.startswith('/') or imguri.startswith(os.sep)): imgpath = path.normpath(imguri[1:]) else: imgpath = path.normpath(path.join(docdir, imguri)) node['uri'] = imgpath if imgpath.endswith((os.extsep + '*')): for filename in glob(path.join(self.srcdir, imgpath)): new_imgpath = relative_path(self.srcdir, filename) if filename.lower().endswith('.pdf'): candidates['application/pdf'] = new_imgpath elif filename.lower().endswith('.svg'): candidates['image/svg+xml'] = new_imgpath else: try: f = open(filename, 'rb') try: imgtype = imghdr.what(f) finally: f.close() except (OSError, IOError): self.warn(docname, ('image file %s not readable' % filename)) if imgtype: candidates[('image/' + imgtype)] = new_imgpath else: candidates['*'] = imgpath for imgpath in candidates.itervalues(): self.dependencies.setdefault(docname, set()).add(imgpath) if (not os.access(path.join(self.srcdir, imgpath), os.R_OK)): self.warn(docname, ('image file not readable: %s' % imgpath), node.line) continue self.images.add_file(docname, imgpath)
'Process the docinfo part of the doctree as metadata.'
def process_metadata(self, docname, doctree):
self.metadata[docname] = md = {} try: docinfo = doctree[0] except IndexError: return if (docinfo.__class__ is not nodes.docinfo): return for node in docinfo: if (node.__class__ is nodes.author): md['author'] = node.astext() elif (node.__class__ is nodes.field): (name, body) = node md[name.astext()] = body.astext() del doctree[0]
'Add a title node to the document (just copy the first section title), and store that title in the environment.'
def create_title_from(self, docname, document):
titlenode = nodes.title() longtitlenode = titlenode if document.has_key('title'): longtitlenode = nodes.title() longtitlenode += nodes.Text(document['title']) for node in document.traverse(nodes.section): visitor = SphinxContentsFilter(document) node[0].walkabout(visitor) titlenode += visitor.get_entry_text() break else: titlenode += nodes.Text('<no title>') self.titles[docname] = titlenode self.longtitles[docname] = longtitlenode
'Note a TOC tree directive in a document and gather information about file relations from it.'
def note_toctree(self, docname, toctreenode):
if toctreenode['glob']: self.glob_toctrees.add(docname) if toctreenode.get('numbered'): self.numbered_toctrees.add(docname) includefiles = toctreenode['includefiles'] for includefile in includefiles: self.files_to_rebuild.setdefault(includefile, set()).add(docname) self.toctree_includes.setdefault(docname, []).extend(includefiles)
'Build a TOC from the doctree and store it in the inventory.'
def build_toc_from(self, docname, document):
numentries = [0] try: maxdepth = int(self.metadata[docname].get('tocdepth', 0)) except ValueError: maxdepth = 0 def traverse_in_section(node, cls): 'Like traverse(), but stay within the same section.' result = [] if isinstance(node, cls): result.append(node) for child in node.children: if isinstance(child, nodes.section): continue result.extend(traverse_in_section(child, cls)) return result def build_toc(node, depth=1): entries = [] for sectionnode in node: if (not isinstance(sectionnode, nodes.section)): for toctreenode in traverse_in_section(sectionnode, addnodes.toctree): item = toctreenode.copy() entries.append(item) self.note_toctree(docname, toctreenode) continue title = sectionnode[0] visitor = SphinxContentsFilter(document) title.walkabout(visitor) nodetext = visitor.get_entry_text() if (not numentries[0]): anchorname = '' else: anchorname = ('#' + sectionnode['ids'][0]) numentries[0] += 1 reference = nodes.reference('', '', refuri=docname, anchorname=anchorname, *nodetext) para = addnodes.compact_paragraph('', '', reference) item = nodes.list_item('', para) if ((maxdepth == 0) or (depth < maxdepth)): item += build_toc(sectionnode, (depth + 1)) entries.append(item) if entries: return nodes.bullet_list('', *entries) return [] toc = build_toc(document) if toc: self.tocs[docname] = toc else: self.tocs[docname] = nodes.bullet_list('') self.toc_num_entries[docname] = numentries[0]
'Return a TOC nodetree -- for use on the same page only!'
def get_toc_for(self, docname):
toc = self.tocs[docname].deepcopy() for node in toc.traverse(nodes.reference): node['refuri'] = node['anchorname'] return toc
'Return the global TOC nodetree.'
def get_toctree_for(self, docname, builder, collapse):
doctree = self.get_doctree(self.config.master_doc) for toctreenode in doctree.traverse(addnodes.toctree): result = self.resolve_toctree(docname, builder, toctreenode, prune=True, collapse=collapse) if (result is not None): return result
'Read the doctree for a file from the pickle and return it.'
def get_doctree(self, docname):
doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree') f = open(doctree_filename, 'rb') try: doctree = pickle.load(f) finally: f.close() doctree.settings.env = self doctree.reporter = Reporter(self.doc2path(docname), 2, 4, stream=WarningStream(self._warnfunc)) return doctree
'Read the doctree from the pickle, resolve cross-references and toctrees and return it.'
def get_and_resolve_doctree(self, docname, builder, doctree=None, prune_toctrees=True):
if (doctree is None): doctree = self.get_doctree(docname) self.resolve_references(doctree, docname, builder) for toctreenode in doctree.traverse(addnodes.toctree): result = self.resolve_toctree(docname, builder, toctreenode, prune=prune_toctrees) if (result is None): toctreenode.replace_self([]) else: toctreenode.replace_self(result) return doctree
'Resolve a *toctree* node into individual bullet lists with titles as items, returning None (if no containing titles are found) or a new node. If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0, to the value of the *maxdepth* option on the *toctree* node. If *titles_only* is True, only toplevel document titles will be in the resulting tree. If *collapse* is True, all branches not containing docname will be collapsed.'
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0, titles_only=False, collapse=False):
if toctree.get('hidden', False): return None def _walk_depth(node, depth, maxdepth): 'Utility: Cut a TOC at a specified depth.' for subnode in node.children[:]: if isinstance(subnode, (addnodes.compact_paragraph, nodes.list_item)): subnode['classes'].append(('toctree-l%d' % (depth - 1))) _walk_depth(subnode, depth, maxdepth) elif isinstance(subnode, nodes.bullet_list): if ((maxdepth > 0) and (depth > maxdepth)): subnode.parent.replace(subnode, []) else: _walk_depth(subnode, (depth + 1), maxdepth) if (collapse and (depth > 1) and ('current' not in subnode.parent['classes'])): subnode.parent.remove(subnode) elif isinstance(subnode, nodes.reference): if ((subnode['refuri'] == docname) and (not subnode['anchorname'])): p = subnode while p: p['classes'].append('current') p = p.parent def _entries_from_toctree(toctreenode, separate=False, subtree=False): 'Return TOC entries for a toctree node.' refs = [(e[0], str(e[1])) for e in toctreenode['entries']] entries = [] for (title, ref) in refs: try: if url_re.match(ref): reference = nodes.reference('', '', refuri=ref, anchorname='', *[nodes.Text(title)]) para = addnodes.compact_paragraph('', '', reference) item = nodes.list_item('', para) toc = nodes.bullet_list('', item) elif (ref == 'self'): ref = toctreenode['parent'] if (not title): title = self.titles[ref].astext() reference = nodes.reference('', '', refuri=ref, anchorname='', *[nodes.Text(title)]) para = addnodes.compact_paragraph('', '', reference) item = nodes.list_item('', para) toc = nodes.bullet_list('', item) else: toc = self.tocs[ref].deepcopy() if (title and toc.children and (len(toc.children) == 1)): child = toc.children[0] for refnode in child.traverse(nodes.reference): if ((refnode['refuri'] == ref) and (not refnode['anchorname'])): refnode.children = [nodes.Text(title)] if (not toc.children): self.warn(docname, ("toctree contains reference to document %r that doesn't have a title: no link will be generated" % ref)) except KeyError: self.warn(docname, ('toctree contains reference to nonexisting document %r' % ref)) else: if titles_only: for toplevel in toc: if (len(toplevel) > 1): subtrees = toplevel.traverse(addnodes.toctree) toplevel[1][:] = subtrees for toctreenode in toc.traverse(addnodes.toctree): i = (toctreenode.parent.index(toctreenode) + 1) for item in _entries_from_toctree(toctreenode, subtree=True): toctreenode.parent.insert(i, item) i += 1 toctreenode.parent.remove(toctreenode) if separate: entries.append(toc) else: entries.extend(toc.children) if ((not subtree) and (not separate)): ret = nodes.bullet_list() ret += entries return [ret] return entries maxdepth = (maxdepth or toctree.get('maxdepth', (-1))) tocentries = _entries_from_toctree(toctree, separate=False) if (not tocentries): return None newnode = addnodes.compact_paragraph('', '', *tocentries) newnode['toctree'] = True _walk_depth(newnode, 1, ((prune and maxdepth) or 0)) for refnode in newnode.traverse(nodes.reference): if (not url_re.match(refnode['refuri'])): refnode['refuri'] = (builder.get_relative_uri(docname, refnode['refuri']) + refnode['anchorname']) return newnode
'Assign a section number to each heading under a numbered toctree.'
def assign_section_numbers(self):
rewrite_needed = [] old_secnumbers = self.toc_secnumbers self.toc_secnumbers = {} def _walk_toc(node, secnums, titlenode=None): for subnode in node.children: if isinstance(subnode, nodes.bullet_list): numstack.append(0) _walk_toc(subnode, secnums, titlenode) numstack.pop() titlenode = None elif isinstance(subnode, nodes.list_item): _walk_toc(subnode, secnums, titlenode) titlenode = None elif isinstance(subnode, addnodes.compact_paragraph): numstack[(-1)] += 1 secnums[subnode[0]['anchorname']] = subnode[0]['secnumber'] = tuple(numstack) if titlenode: titlenode['secnumber'] = tuple(numstack) titlenode = None elif isinstance(subnode, addnodes.toctree): _walk_toctree(subnode) def _walk_toctree(toctreenode): for (title, ref) in toctreenode['entries']: if (url_re.match(ref) or (ref == 'self')): continue if (ref in self.tocs): secnums = self.toc_secnumbers[ref] = {} _walk_toc(self.tocs[ref], secnums, self.titles.get(ref)) if (secnums != old_secnumbers.get(ref)): rewrite_needed.append(ref) for docname in self.numbered_toctrees: doctree = self.get_doctree(docname) for toctreenode in doctree.traverse(addnodes.toctree): if toctreenode.get('numbered'): numstack = [0] _walk_toctree(toctreenode) return rewrite_needed
'Create the real index from the collected index entries.'
def create_index(self, builder, _fixre=re.compile('(.*) ([(][^()]*[)])')):
new = {} def add_entry(word, subword, dic=new): entry = dic.get(word) if (not entry): dic[word] = entry = [[], {}] if subword: add_entry(subword, '', dic=entry[1]) else: try: entry[0].append(((builder.get_relative_uri('genindex', fn) + '#') + tid)) except NoUri: pass for (fn, entries) in self.indexentries.iteritems(): for (type, value, tid, alias) in entries: if (type == 'single'): try: (entry, subentry) = value.split(';', 1) except ValueError: (entry, subentry) = (value, '') if (not entry): self.warn(fn, ('invalid index entry %r' % value)) continue add_entry(entry.strip(), subentry.strip()) elif (type == 'pair'): try: (first, second) = map((lambda x: x.strip()), value.split(';', 1)) if ((not first) or (not second)): raise ValueError except ValueError: self.warn(fn, ('invalid pair index entry %r' % value)) continue add_entry(first, second) add_entry(second, first) elif (type == 'triple'): try: (first, second, third) = map((lambda x: x.strip()), value.split(';', 2)) if ((not first) or (not second) or (not third)): raise ValueError except ValueError: self.warn(fn, ('invalid triple index entry %r' % value)) continue add_entry(first, ((second + ' ') + third)) add_entry(second, ((third + ', ') + first)) add_entry(third, ((first + ' ') + second)) else: self.warn(fn, ('unknown index entry type %r' % type)) def keyfunc(entry, lcletters=(string.ascii_lowercase + '_')): lckey = entry[0].lower() if (lckey[0:1] in lcletters): return (chr(127) + lckey) return lckey newlist = new.items() newlist.sort(key=keyfunc) oldkey = '' oldsubitems = None i = 0 while (i < len(newlist)): (key, (targets, subitems)) = newlist[i] if (not subitems): m = _fixre.match(key) if m: if (oldkey == m.group(1)): oldsubitems.setdefault(m.group(2), [[], {}])[0].extend(targets) del newlist[i] continue oldkey = m.group(1) else: oldkey = key oldsubitems = subitems i += 1 def keyfunc((k, v), letters=(string.ascii_uppercase + '_')): v[1] = sorted(((si, se) for (si, (se, void)) in v[1].iteritems())) letter = k[0].upper() if (letter in letters): return letter else: return 'Symbols' return [(key, list(group)) for (key, group) in groupby(newlist, keyfunc)]
'Do consistency checks.'
def check_consistency(self):
for docname in sorted(self.all_docs): if (docname not in self.files_to_rebuild): if (docname == self.config.master_doc): continue self.warn(docname, "document isn't included in any toctree")
'Find a description node matching "name", perhaps using the given module and/or classname.'
def find_desc(self, modname, classname, name, type, searchorder=0):
if (name[(-2):] == '()'): name = name[:(-2)] if (not name): return (None, None) if ((type[0] == 'c') and (type not in ('class', 'const'))): name = name.rstrip(' *') if ((name in self.descrefs) and (self.descrefs[name][1][0] == 'c')): return (name, self.descrefs[name]) return (None, None) newname = None if (searchorder == 1): if (modname and classname and (((((modname + '.') + classname) + '.') + name) in self.descrefs)): newname = ((((modname + '.') + classname) + '.') + name) elif (modname and (((modname + '.') + name) in self.descrefs)): newname = ((modname + '.') + name) elif (name in self.descrefs): newname = name elif (name in self.descrefs): newname = name elif (modname and (((modname + '.') + name) in self.descrefs)): newname = ((modname + '.') + name) elif (modname and classname and (((((modname + '.') + classname) + '.') + name) in self.descrefs)): newname = ((((modname + '.') + classname) + '.') + name) elif ((type == 'exc') and ('.' not in name) and (('exceptions.' + name) in self.descrefs)): newname = ('exceptions.' + name) elif ((type in ('func', 'meth')) and ('.' not in name) and (('object.' + name) in self.descrefs)): newname = ('object.' + name) if (newname is None): return (None, None) return (newname, self.descrefs[newname])
'Find keyword matches for a keyword. If there\'s an exact match, just return it, else return a list of fuzzy matches if avoid_fuzzy isn\'t True. Keywords searched are: first modules, then descrefs. Returns: None if nothing found (type, docname, anchorname) if exact match found list of (quality, type, docname, anchorname, description) if fuzzy'
def find_keyword(self, keyword, avoid_fuzzy=False, cutoff=0.6, n=20):
if (keyword in self.modules): (docname, title, system, deprecated) = self.modules[keyword] return ('module', docname, ('module-' + keyword)) if (keyword in self.descrefs): (docname, ref_type) = self.descrefs[keyword] return (ref_type, docname, keyword) if ('.' not in keyword): if (('exceptions.' + keyword) in self.descrefs): (docname, ref_type) = self.descrefs[('exceptions.' + keyword)] return (ref_type, docname, ('exceptions.' + keyword)) if (('object.' + keyword) in self.descrefs): (docname, ref_type) = self.descrefs[('object.' + keyword)] return (ref_type, docname, ('object.' + keyword)) if avoid_fuzzy: return s = difflib.SequenceMatcher() s.set_seq2(keyword.lower()) def possibilities(): for (title, (fn, desc, _, _)) in self.modules.iteritems(): (yield ('module', fn, ('module-' + title), desc)) for (title, (fn, desctype)) in self.descrefs.iteritems(): (yield (desctype, fn, title, '')) def dotsearch(string): parts = string.lower().split('.') for idx in xrange(0, len(parts)): (yield '.'.join(parts[idx:])) result = [] for (type, docname, title, desc) in possibilities(): best_res = 0 for part in dotsearch(title): s.set_seq1(part) if ((s.real_quick_ratio() >= cutoff) and (s.quick_ratio() >= cutoff) and (s.ratio() >= cutoff) and (s.ratio() > best_res)): best_res = s.ratio() if best_res: result.append((best_res, type, docname, title, desc)) return heapq.nlargest(n, result)
'Load necessary templates and perform initialization. The default implementation does nothing.'
def init(self):
pass
'Return the template bridge configured.'
def create_template_bridge(self):
if self.config.template_bridge: self.templates = self.app.import_object(self.config.template_bridge, 'template_bridge setting')() else: from sphinx.jinja2glue import BuiltinTemplateLoader self.templates = BuiltinTemplateLoader()
'Return the target URI for a document name (*typ* can be used to qualify the link characteristic for individual builders).'
def get_target_uri(self, docname, typ=None):
raise NotImplementedError
'Return a relative URI between two source filenames. May raise environment.NoUri if there\'s no way to return a sensible URI.'
def get_relative_uri(self, from_, to, typ=None):
return relative_uri(self.get_target_uri(from_), self.get_target_uri(to, typ))
'Return an iterable of output files that are outdated, or a string describing what an update build will build. If the builder does not output individual files corresponding to source files, return a string here. If it does, return an iterable of those files that need to be written.'
def get_outdated_docs(self):
raise NotImplementedError
'Pick the best candidate for all image URIs.'
def post_process_images(self, doctree):
for node in doctree.traverse(nodes.image): if ('?' in node['candidates']): continue if ('*' not in node['candidates']): for imgtype in self.supported_image_types: candidate = node['candidates'].get(imgtype, None) if candidate: break else: self.warn(('no matching candidate for image URI %r' % node['uri']), ('%s:%s' % (node.source, getattr(node, 'line', '')))) continue node['uri'] = candidate else: candidate = node['uri'] if (candidate not in self.env.images): continue self.images[candidate] = self.env.images[candidate][1]
'Load translated strings from the configured localedirs if enabled in the configuration.'
def load_i18n(self):
self.translator = None if (self.config.language is not None): self.info(bold(('loading translations [%s]... ' % self.config.language)), nonl=True) locale_dirs = ([None, path.join(package_dir, 'locale')] + [path.join(self.srcdir, x) for x in self.config.locale_dirs]) for dir_ in locale_dirs: try: trans = gettext.translation('sphinx', localedir=dir_, languages=[self.config.language]) if (self.translator is None): self.translator = trans else: self.translator._catalog.update(trans.catalog) except Exception: pass if (self.translator is not None): self.info('done') else: self.info('locale not available') if (self.translator is None): self.translator = gettext.NullTranslations() self.translator.install(unicode=True) locale.init()
'Set up the build environment.'
def load_env(self):
if self.env: return if (not self.freshenv): try: self.info(bold('loading pickled environment... '), nonl=True) self.env = BuildEnvironment.frompickle(self.config, path.join(self.doctreedir, ENV_PICKLE_FILENAME)) self.info('done') except Exception as err: if ((type(err) is IOError) and (err.errno == 2)): self.info('not found') else: self.info(('failed: %s' % err)) self.env = BuildEnvironment(self.srcdir, self.doctreedir, self.config) self.env.find_files(self.config) else: self.env = BuildEnvironment(self.srcdir, self.doctreedir, self.config) self.env.find_files(self.config) self.env.set_warnfunc(self.warn)
'Build all source files.'
def build_all(self):
self.build(None, summary='all source files', method='all')
'Only rebuild as much as needed for changes in the *filenames*.'
def build_specific(self, filenames):
dirlen = (len(self.srcdir) + 1) to_write = [] suffix = self.config.source_suffix for filename in filenames: filename = path.normpath(path.abspath(filename)) if (not filename.startswith(self.srcdir)): self.warn(('file %r given on command line is not under the source directory, ignoring' % filename)) continue if (not (path.isfile(filename) or path.isfile((filename + suffix)))): self.warn(('file %r given on command line does not exist, ignoring' % filename)) continue filename = filename[dirlen:] if filename.endswith(suffix): filename = filename[:(- len(suffix))] filename = filename.replace(path.sep, SEP) to_write.append(filename) self.build(to_write, method='specific', summary=('%d source files given on command line' % len(to_write)))
'Only rebuild what was changed or added since last build.'
def build_update(self):
to_build = self.get_outdated_docs() if isinstance(to_build, str): self.build(['__all__'], to_build) else: to_build = list(to_build) self.build(to_build, summary=('targets for %d source files that are out of date' % len(to_build)))
'Main build method. First updates the environment, and then calls :meth:`write`.'
def build(self, docnames, summary=None, method='update'):
if summary: self.info(bold(('building [%s]: ' % self.name)), nonl=1) self.info(summary) updated_docnames = set() warnings = [] self.env.set_warnfunc((lambda *args: warnings.append(args))) self.info(bold('updating environment: '), nonl=1) (msg, length, iterator) = self.env.update(self.config, self.srcdir, self.doctreedir, self.app) self.info(msg) for docname in self.status_iterator(iterator, 'reading sources... ', purple, length): updated_docnames.add(docname) for warning in warnings: self.warn(*warning) self.env.set_warnfunc(self.warn) doccount = len(updated_docnames) self.info(bold('looking for now-outdated files... '), nonl=1) for docname in self.env.check_dependents(updated_docnames): updated_docnames.add(docname) outdated = (len(updated_docnames) - doccount) if outdated: self.info(('%d found' % outdated)) else: self.info('none found') if updated_docnames: self.info(bold('pickling environment... '), nonl=True) self.env.topickle(path.join(self.doctreedir, ENV_PICKLE_FILENAME)) self.info('done') self.info(bold('checking consistency... '), nonl=True) self.env.check_consistency() self.info('done') elif ((method == 'update') and (not docnames)): self.info(bold('no targets are out of date.')) return self.write(docnames, list(updated_docnames), method) self.finish() status = (((self.app.statuscode == 0) and 'succeeded') or 'finished with problems') if self.app._warncount: self.info(bold(('build %s, %s warning%s.' % (status, self.app._warncount, (((self.app._warncount != 1) and 's') or ''))))) else: self.info(bold(('build %s.' % status)))
'Finish the building process. The default implementation does nothing.'
def finish(self):
pass
'Utility: Render a lone doctree node.'
def render_partial(self, node):
doc = new_document('<partial node>') doc.append(node) return publish_parts(doc, source_class=DocTreeInput, reader=DoctreeReader(), writer=HTMLWriter(self), settings_overrides={'output_encoding': 'unicode'})
'Collect items for the template context of a page.'
def get_doc_context(self, docname, body, metatags):
prev = next = None parents = [] rellinks = self.globalcontext['rellinks'][:] related = self.relations.get(docname) titles = self.env.titles if (related and related[2]): try: next = {'link': self.get_relative_uri(docname, related[2]), 'title': self.render_partial(titles[related[2]])['title']} rellinks.append((related[2], next['title'], 'N', _('next'))) except KeyError: next = None if (related and related[1]): try: prev = {'link': self.get_relative_uri(docname, related[1]), 'title': self.render_partial(titles[related[1]])['title']} rellinks.append((related[1], prev['title'], 'P', _('previous'))) except KeyError: prev = None while (related and related[0]): try: parents.append({'link': self.get_relative_uri(docname, related[0]), 'title': self.render_partial(titles[related[0]])['title']}) except KeyError: pass related = self.relations.get(related[0]) if parents: parents.pop() parents.reverse() title = self.env.longtitles.get(docname) title = ((title and self.render_partial(title)['title']) or '') sourcename = ((self.config.html_copy_source and (docname + '.txt')) or '') meta = self.env.metadata.get(docname) toc = self.render_partial(self.env.get_toc_for(docname))['fragment'] return dict(parents=parents, prev=prev, next=next, title=title, meta=meta, body=body, metatags=metatags, rellinks=rellinks, sourcename=sourcename, toc=toc, display_toc=(self.env.toc_num_entries[docname] > 1))
'Pick the best candidate for an image and link down-scaled images to their high res version.'
def post_process_images(self, doctree):
Builder.post_process_images(self, doctree) for node in doctree.traverse(nodes.image): if ((not node.has_key('scale')) or isinstance(node.parent, nodes.reference)): continue uri = node['uri'] reference = nodes.reference() if (uri in self.images): reference['refuri'] = posixpath.join(self.imgpath, self.images[uri]) else: reference['refuri'] = uri node.replace_self(reference) reference.append(node)
'Search all theme paths for available themes.'
@classmethod def init_themes(cls, builder):
cls.themepath = list(builder.config.html_theme_path) cls.themepath.append(path.join(package_dir, 'themes')) for themedir in cls.themepath[::(-1)]: themedir = path.join(builder.confdir, themedir) if (not path.isdir(themedir)): continue for theme in os.listdir(themedir): if theme.lower().endswith('.zip'): try: zfile = zipfile.ZipFile(path.join(themedir, theme)) if (THEMECONF not in zfile.namelist()): continue tname = theme[:(-4)] tinfo = zfile except Exception: builder.warn(('file %r on theme path is not a valid zipfile or contains no theme' % theme)) continue else: if (not path.isfile(path.join(themedir, theme, THEMECONF))): continue tname = theme tinfo = None cls.themes[tname] = (path.join(themedir, theme), tinfo)
'Return the value for a theme configuration setting, searching the base theme chain.'
def get_confstr(self, section, name, default=NODEFAULT):
try: return self.themeconf.get(section, name) except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): if (self.base is not None): return self.base.get_confstr(section, name, default) if (default is NODEFAULT): raise ThemeError(('setting %s.%s occurs in none of the searched theme configs' % (section, name))) else: return default
'Return a dictionary of theme options and their values.'
def get_options(self, overrides):
chain = [self.themeconf] base = self.base while (base is not None): chain.append(base.themeconf) base = base.base options = {} for conf in reversed(chain): try: options.update(conf.items('options')) except ConfigParser.NoSectionError: pass for (option, value) in overrides.iteritems(): if (option not in options): raise ThemeError(('unsupported theme option %r given' % option)) options[option] = value return options
'Return a list of theme directories, beginning with this theme\'s, then the base theme\'s, then that one\'s base theme\'s, etc.'
def get_dirchain(self):
chain = [self.themedir] base = self.base while (base is not None): chain.append(base.themedir) base = base.base return chain
'Remove temporary directories.'
def cleanup(self):
if self.themedir_created: try: shutil.rmtree(self.themedir) except Exception: pass if self.base: self.base.cleanup()
'Constructor. The grammar argument is a grammar.Grammar instance; see the grammar module for more information. The parser is not ready yet for parsing; you must call the setup() method to get it started. The optional convert argument is a function mapping concrete syntax tree nodes to abstract syntax tree nodes. If not given, no conversion is done and the syntax tree produced is the concrete syntax tree. If given, it must be a function of two arguments, the first being the grammar (a grammar.Grammar instance), and the second being the concrete syntax tree node to be converted. The syntax tree is converted from the bottom up. A concrete syntax tree node is a (type, value, context, nodes) tuple, where type is the node type (a token or symbol number), value is None for symbols and a string for tokens, context is None or an opaque value used for error reporting (typically a (lineno, offset) pair), and nodes is a list of children for symbols, and None for tokens. An abstract syntax tree node may be anything; this is entirely up to the converter function.'
def __init__(self, grammar, convert=None):
self.grammar = grammar self.convert = (convert or (lambda grammar, node: node))
'Prepare for parsing. This *must* be called before starting to parse. The optional argument is an alternative start symbol; it defaults to the grammar\'s start symbol. You can use a Parser instance to parse any number of programs; each time you call setup() the parser is reset to an initial state determined by the (implicit or explicit) start symbol.'
def setup(self, start=None):
if (start is None): start = self.grammar.start newnode = (start, None, None, []) stackentry = (self.grammar.dfas[start], 0, newnode) self.stack = [stackentry] self.rootnode = None self.used_names = set()
'Add a token; return True iff this is the end of the program.'
def addtoken(self, type, value, context):
ilabel = self.classify(type, value, context) while True: (dfa, state, node) = self.stack[(-1)] (states, first) = dfa arcs = states[state] for (i, newstate) in arcs: (t, v) = self.grammar.labels[i] if (ilabel == i): assert (t < 256) self.shift(type, value, newstate, context) state = newstate while (states[state] == [(0, state)]): self.pop() if (not self.stack): return True (dfa, state, node) = self.stack[(-1)] (states, first) = dfa return False elif (t >= 256): itsdfa = self.grammar.dfas[t] (itsstates, itsfirst) = itsdfa if (ilabel in itsfirst): self.push(t, self.grammar.dfas[t], newstate, context) break else: if ((0, state) in arcs): self.pop() if (not self.stack): raise ParseError('too much input', type, value, context) else: raise ParseError('bad input', type, value, context)
'Turn a token into a label. (Internal)'
def classify(self, type, value, context):
if (type == token.NAME): self.used_names.add(value) ilabel = self.grammar.keywords.get(value) if (ilabel is not None): return ilabel ilabel = self.grammar.tokens.get(type) if (ilabel is None): raise ParseError('bad token', type, value, context) return ilabel
'Shift a token. (Internal)'
def shift(self, type, value, newstate, context):
(dfa, state, node) = self.stack[(-1)] newnode = (type, value, context, None) newnode = self.convert(self.grammar, newnode) if (newnode is not None): node[(-1)].append(newnode) self.stack[(-1)] = (dfa, newstate, node)
'Push a nonterminal. (Internal)'
def push(self, type, newdfa, newstate, context):
(dfa, state, node) = self.stack[(-1)] newnode = (type, None, context, []) self.stack[(-1)] = (dfa, newstate, node) self.stack.append((newdfa, 0, newnode))
'Pop a nonterminal. (Internal)'
def pop(self):
(popdfa, popstate, popnode) = self.stack.pop() newnode = self.convert(self.grammar, popnode) if (newnode is not None): if self.stack: (dfa, state, node) = self.stack[(-1)] node[(-1)].append(newnode) else: self.rootnode = newnode self.rootnode.used_names = self.used_names
'Dump the grammar tables to a pickle file.'
def dump(self, filename):
f = open(filename, 'wb') pickle.dump(self.__dict__, f, 2) f.close()
'Load the grammar tables from a pickle file.'
def load(self, filename):
f = open(filename, 'rb') d = pickle.load(f) f.close() self.__dict__.update(d)
'Dump the grammar tables to standard output, for debugging.'
def report(self):
from pprint import pprint print 's2n' pprint(self.symbol2number) print 'n2s' pprint(self.number2symbol) print 'states' pprint(self.states) print 'dfas' pprint(self.dfas) print 'labels' pprint(self.labels) print 'start', self.start
'Parse a series of tokens and return the syntax tree.'
def parse_tokens(self, tokens, debug=False):
p = parse.Parser(self.grammar, self.convert) p.setup() lineno = 1 column = 0 type = value = start = end = line_text = None prefix = '' opmap = grammar.opmap for (type, value, start, end, line_text) in tokens: if (start != (lineno, column)): assert ((lineno, column) <= start), ((lineno, column), start) (s_lineno, s_column) = start if (lineno < s_lineno): prefix += ('\n' * (s_lineno - lineno)) lineno = s_lineno column = 0 if (column < s_column): prefix += line_text[column:s_column] column = s_column if (type in (tokenize.COMMENT, tokenize.NL)): prefix += value (lineno, column) = end if value.endswith('\n'): lineno += 1 column = 0 continue if (type == token.OP): type = opmap[value] if p.addtoken(type, value, (prefix, start)): break prefix = '' (lineno, column) = end if value.endswith('\n'): lineno += 1 column = 0 else: raise parse.ParseError('incomplete input', type, value, line_text) return p.rootnode
'Parse a stream and return the syntax tree.'
def parse_stream_raw(self, stream, debug=False):
tokens = tokenize.generate_tokens(stream.readline) return self.parse_tokens(tokens, debug)
'Parse a stream and return the syntax tree.'
def parse_stream(self, stream, debug=False):
return self.parse_stream_raw(stream, debug)
'Parse a file and return the syntax tree.'
def parse_file(self, filename, debug=False):
stream = open(filename) try: return self.parse_stream(stream, debug) finally: stream.close()
'Parse a string and return the syntax tree.'
def parse_string(self, text, debug=False):
tokens = tokenize.generate_tokens(generate_lines(text).next) return self.parse_tokens(tokens, debug)
'Visit an assignment which may have a special comment before it.'
def visit_expr_stmt(self, node):
if (_eq not in node.children): return pnode = node[0] prefix = pnode.get_prefix() while (not prefix): pnode = pnode.get_prev_leaf() if ((not pnode) or (pnode.type not in (token.INDENT, token.DEDENT))): break prefix = pnode.get_prefix() prefix = prefix.decode(self.encoding) docstring = prepare_commentdoc(prefix) if docstring: self.add_docstring(node, docstring)