desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Check good hashes against a file-like object Raise HashMismatch if none match.'
def check_against_file(self, file):
return self.check_against_chunks(read_chunks(file))
'Return whether I know any known-good hashes.'
def __nonzero__(self):
return bool(self._allowed)
'Don\'t offer the ``hashes`` kwarg.'
def __init__(self):
super(MissingHashes, self).__init__(hashes={FAVORITE_HASH: []})
'Marshal cmd line args into a requirement set.'
@staticmethod def populate_requirement_set(requirement_set, args, options, finder, session, name, wheel_cache):
for filename in options.constraints: for req in parse_requirements(filename, constraint=True, finder=finder, options=options, session=session, wheel_cache=wheel_cache): requirement_set.add_requirement(req) for req in args: requirement_set.add_requirement(InstallRequirement.from_line(req, None, isolated=options.isolated_mode, wheel_cache=wheel_cache)) for req in options.editables: requirement_set.add_requirement(InstallRequirement.from_editable(req, default_vcs=options.default_vcs, isolated=options.isolated_mode, wheel_cache=wheel_cache)) found_req_in_file = False for filename in options.requirements: for req in parse_requirements(filename, finder=finder, options=options, session=session, wheel_cache=wheel_cache): found_req_in_file = True requirement_set.add_requirement(req) requirement_set.require_hashes = options.require_hashes if (not (args or options.editables or found_req_in_file)): opts = {'name': name} if options.find_links: msg = ('You must give at least one requirement to %(name)s (maybe you meant "pip %(name)s %(links)s"?)' % dict(opts, links=' '.join(options.find_links))) else: msg = ('You must give at least one requirement to %(name)s (see "pip help %(name)s")' % opts) logger.warning(msg)
'Create a package finder appropriate to this requirement command.'
def _build_package_finder(self, options, session):
index_urls = ([options.index_url] + options.extra_index_urls) if options.no_index: logger.info('Ignoring indexes: %s', ','.join(index_urls)) index_urls = [] return PackageFinder(find_links=options.find_links, format_control=options.format_control, index_urls=index_urls, trusted_hosts=options.trusted_hosts, allow_all_prereleases=options.pre, process_dependency_links=options.process_dependency_links, session=session)
'A rule for ignoring issues'
def __init__(self, docname, lineno, issue, line):
self.docname = docname self.lineno = lineno self.issue = issue self.line = line
'Determine whether this issue should be ignored.'
def is_ignored(self, line, lineno, issue):
docname = self.docname for rule in self.rules: if (rule.docname != docname): continue if (rule.issue != issue): continue if (rule.line not in line): continue if ((rule.lineno is not None) and (abs((rule.lineno - lineno)) > 5)): continue return True return False
'Load database of previously ignored issues. A csv file, with exactly the same format as suspicious.csv Fields: document name (normalized), line number, issue, surrounding text'
def load_rules(self, filename):
self.info('loading ignore rules... ', nonl=1) self.rules = rules = [] try: f = open(filename, 'rb') except IOError: return for (i, row) in enumerate(csv.reader(f)): if (len(row) != 4): raise ValueError(('wrong format in %s, line %d: %s' % (filename, (i + 1), row))) (docname, lineno, issue, text) = row docname = docname.decode('utf-8') if lineno: lineno = int(lineno) else: lineno = None issue = issue.decode('utf-8') text = text.decode('utf-8') rule = Rule(docname, lineno, issue, text) rules.append(rule) f.close() self.info(('done, %d rules loaded' % len(self.rules)))
'Initial setup. If any of `reader`, `parser`, or `writer` are not specified, the corresponding ``set_...`` method should be called with a component name (`set_reader` sets the parser as well).'
def __init__(self, reader=None, parser=None, writer=None, source=None, source_class=io.FileInput, destination=None, destination_class=io.FileOutput, settings=None):
self.document = None 'The document tree (`docutils.nodes` objects).' self.reader = reader 'A `docutils.readers.Reader` instance.' self.parser = parser 'A `docutils.parsers.Parser` instance.' self.writer = writer 'A `docutils.writers.Writer` instance.' for component in ('reader', 'parser', 'writer'): assert (not isinstance(getattr(self, component), StringType)), ('passed string "%s" as "%s" parameter; pass an instance, or use the "%s_name" parameter instead (in docutils.core.publish_* convenience functions).' % (getattr(self, component), component, component)) self.source = source 'The source of input data, a `docutils.io.Input` instance.' self.source_class = source_class 'The class for dynamically created source objects.' self.destination = destination 'The destination for docutils output, a `docutils.io.Output`\n instance.' self.destination_class = destination_class 'The class for dynamically created destination objects.' self.settings = settings 'An object containing Docutils settings as instance attributes.\n Set by `self.process_command_line()` or `self.get_settings()`.'
'Set `self.reader` by name.'
def set_reader(self, reader_name, parser, parser_name):
reader_class = readers.get_reader_class(reader_name) self.reader = reader_class(parser, parser_name) self.parser = self.reader.parser
'Set `self.writer` by name.'
def set_writer(self, writer_name):
writer_class = writers.get_writer_class(writer_name) self.writer = writer_class()
'Set and return default settings (overrides in `defaults` dict). Set components first (`self.set_reader` & `self.set_writer`). Explicitly setting `self.settings` disables command line option processing from `self.publish()`.'
def get_settings(self, usage=None, description=None, settings_spec=None, config_section=None, **defaults):
option_parser = self.setup_option_parser(usage, description, settings_spec, config_section, **defaults) self.settings = option_parser.get_default_values() return self.settings
'Pass an empty list to `argv` to avoid reading `sys.argv` (the default). Set components first (`self.set_reader` & `self.set_writer`).'
def process_command_line(self, argv=None, usage=None, description=None, settings_spec=None, config_section=None, **defaults):
option_parser = self.setup_option_parser(usage, description, settings_spec, config_section, **defaults) if (argv is None): argv = sys.argv[1:] self.settings = option_parser.parse_args(argv)
'Process command line options and arguments (if `self.settings` not already set), run `self.reader` and then `self.writer`. Return `self.writer`\'s output.'
def publish(self, argv=None, usage=None, description=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=None):
exit = None try: if (self.settings is None): self.process_command_line(argv, usage, description, settings_spec, config_section, **(settings_overrides or {})) self.set_io() self.document = self.reader.read(self.source, self.parser, self.settings) self.apply_transforms() output = self.writer.write(self.document, self.destination) self.writer.assemble_parts() except SystemExit as error: exit = 1 exit_status = error.code except Exception as error: if (not self.settings): raise if self.settings.traceback: self.debugging_dumps() raise self.report_Exception(error) exit = 1 exit_status = 1 self.debugging_dumps() if (enable_exit_status and self.document and (self.document.reporter.max_level >= self.settings.exit_status_level)): sys.exit((self.document.reporter.max_level + 10)) elif exit: sys.exit(exit_status) return output
'Parse `self.input` into a document tree.'
def parse(self):
self.document = document = self.new_document() module_section = moduleparser.parse_module(self.input, self.source.source_path) module_section.walk(DocformatVisitor(self.document)) visitor = DocstringFormattingVisitor(document=document, default_parser=self.default_parser) module_section.walk(visitor) self.document.append(module_section)
'Get a parser based on its name. We reuse parsers during this visitation, so parser instances are cached.'
def get_parser(self, parser_name):
parser_name = parsers._parser_aliases.get(parser_name, parser_name) if (not self.parsers.has_key(parser_name)): cls = parsers.get_parser_class(parser_name) self.parsers[parser_name] = cls() return self.parsers[parser_name]
'Find the __docformat__ closest to this node (i.e., look in the class or module)'
def find_docformat(self, node):
while node: if node.get('docformat'): return node['docformat'] node = node.parent return self.default_parser
'Return a whitespace-normalized expression string from the right-hand side of an assignment at line `lineno`.'
def rhs(self, lineno):
self.goto_line(lineno) while (self.string != '='): self.next() self.stack = None while ((self.type != token.NEWLINE) and (self.string != ';')): if ((self.string == '=') and (not self.stack)): self.tokens = [] self.stack = [] self._type = None self._string = None self._backquote = 0 else: self.note_token() self.next() self.next() text = ''.join(self.tokens) return text.strip()
'Return a dictionary mapping parameters to defaults (whitespace-normalized strings).'
def function_parameters(self, lineno):
self.goto_line(lineno) while (self.string != 'def'): self.next() while (self.string != '('): self.next() name = None default = None parameter_tuple = None self.tokens = [] parameters = {} self.stack = [self.string] self.next() while 1: if (len(self.stack) == 1): if parameter_tuple: name = ''.join(self.tokens).strip() self.tokens = [] parameter_tuple = None if (self.string in (')', ',')): if name: if self.tokens: default_text = ''.join(self.tokens).strip() else: default_text = None parameters[name] = default_text self.tokens = [] name = None default = None if (self.string == ')'): break elif (self.type == token.NAME): if (name and default): self.note_token() else: assert (name is None), ('token=%r name=%r parameters=%r stack=%r' % (self.token, name, parameters, self.stack)) name = self.string elif (self.string == '='): assert (name is not None), ('token=%r' % (self.token,)) assert (default is None), ('token=%r' % (self.token,)) assert (self.tokens == []), ('token=%r' % (self.token,)) default = 1 self._type = None self._string = None self._backquote = 0 elif name: self.note_token() elif (self.string == '('): parameter_tuple = 1 self._type = None self._string = None self._backquote = 0 self.note_token() else: assert ((self.string in ('*', '**', '\n')) or (self.type == tokenize.COMMENT)), ('token=%r' % (self.token,)) else: self.note_token() self.next() return parameters
'Initialize the Reader instance. Several instance attributes are defined with dummy initial values. Subclasses may use these attributes as they wish.'
def __init__(self, parser=None, parser_name=None):
self.parser = parser 'A `parsers.Parser` instance shared by all doctrees. May be left\n unspecified if the document source determines the parser.' if ((parser is None) and parser_name): self.set_parser(parser_name) self.source = None '`docutils.io` IO object, source of input data.' self.input = None 'Raw text input; either a single string or, for more complex cases,\n a collection of strings.'
'Set `self.parser` by name.'
def set_parser(self, parser_name):
parser_class = parsers.get_parser_class(parser_name) self.parser = parser_class()
'Parse `self.input` into a document tree.'
def parse(self):
self.document = document = self.new_document() self.parser.parse(self.input, document) document.current_source = document.current_line = None
'Create and return a new empty document tree (root node).'
def new_document(self):
document = utils.new_document(self.source.source_path, self.settings) return document
'`parser` should be ``None``.'
def __init__(self, parser=None, parser_name=None):
if (parser is None): parser = rst.Parser(rfc2822=1, inliner=self.inliner_class()) standalone.Reader.__init__(self, parser, '')
'No parsing to do; refurbish the document tree instead. Overrides the inherited method.'
def parse(self):
self.document = self.input self.document.transformer = transforms.Transformer(self.document) self.document.settings = self.settings self.document.reporter = utils.new_reporter(self.document.get('source', ''), self.document.settings)
'Return a shallow copy of `self`.'
def copy(self):
return self.__class__(defaults=self.__dict__)
'Call the validator function on applicable settings and evaluate the \'overrides\' option. Extends `optparse.Option.process`.'
def process(self, opt, value, values, parser):
result = optparse.Option.process(self, opt, value, values, parser) setting = self.dest if setting: if self.validator: value = getattr(values, setting) try: new_value = self.validator(setting, value, parser) except Exception as error: raise (optparse.OptionValueError(('Error in option "%s":\n %s: %s' % (opt, error.__class__.__name__, error))), None, sys.exc_info()[2]) setattr(values, setting, new_value) if self.overrides: setattr(values, self.overrides, None) return result
'`components` is a list of Docutils components each containing a ``.settings_spec`` attribute. `defaults` is a mapping of setting default overrides.'
def __init__(self, components=(), defaults=None, read_config_files=None, *args, **kwargs):
self.lists = {} 'Set of list-type settings.' self.config_files = [] 'List of paths of applied configuration files.' optparse.OptionParser.__init__(self, option_class=Option, add_help_option=None, formatter=optparse.TitledHelpFormatter(width=78), *args, **kwargs) if (not self.version): self.version = self.version_template self.relative_path_settings = list(self.relative_path_settings) self.components = ((self,) + tuple(components)) self.populate_from_components(self.components) self.set_defaults_from_dict((defaults or {})) if (read_config_files and (not self.defaults['_disable_config'])): try: config_settings = self.get_standard_config_settings() except ValueError as error: self.error(error) self.set_defaults_from_dict(config_settings.__dict__)
'For each component, first populate from the `SettingsSpec.settings_spec` structure, then from the `SettingsSpec.settings_defaults` dictionary. After all components have been processed, check for and populate from each component\'s `SettingsSpec.settings_default_overrides` dictionary.'
def populate_from_components(self, components):
for component in components: if (component is None): continue settings_spec = component.settings_spec self.relative_path_settings.extend(component.relative_path_settings) for i in range(0, len(settings_spec), 3): (title, description, option_spec) = settings_spec[i:(i + 3)] if title: group = optparse.OptionGroup(self, title, description) self.add_option_group(group) else: group = self for (help_text, option_strings, kwargs) in option_spec: option = group.add_option(help=help_text, *option_strings, **kwargs) if (kwargs.get('action') == 'append'): self.lists[option.dest] = 1 if component.settings_defaults: self.defaults.update(component.settings_defaults) for component in components: if (component and component.settings_default_overrides): self.defaults.update(component.settings_default_overrides)
'Return list of config files, from environment or standard.'
def get_standard_config_files(self):
try: config_files = os.environ['DOCUTILSCONFIG'].split(os.pathsep) except KeyError: config_files = self.standard_config_files expand = os.path.expanduser if ('HOME' not in os.environ): try: import pwd except ImportError: expand = (lambda x: x) return [expand(f) for f in config_files if f.strip()]
'Returns a dictionary containing appropriate config file settings.'
def get_config_file_settings(self, config_file):
parser = ConfigParser() parser.read(config_file, self) self.config_files.extend(parser._files) base_path = os.path.dirname(config_file) applied = {} settings = Values() for component in self.components: if (not component): continue for section in (tuple((component.config_section_dependencies or ())) + (component.config_section,)): if applied.has_key(section): continue applied[section] = 1 settings.update(parser.get_section(section), self) make_paths_absolute(settings.__dict__, self.relative_path_settings, base_path) return settings.__dict__
'Store positional arguments as runtime settings.'
def check_values(self, values, args):
(values._source, values._destination) = self.check_args(args) make_paths_absolute(values.__dict__, self.relative_path_settings, os.getcwd()) values._config_files = self.config_files return values
'Needed to get custom `Values` instances.'
def get_default_values(self):
defaults = Values(self.defaults) defaults._config_files = self.config_files return defaults
'Get an option by its dest. If you\'re supplying a dest which is shared by several options, it is undefined which option of those is returned. A KeyError is raised if there is no option with the supplied dest.'
def get_option_by_dest(self, dest):
for group in (self.option_groups + [self]): for option in group.option_list: if (option.dest == dest): return option raise KeyError(('No option with dest == %r.' % dest))
'Call the validator function and implement overrides on all applicable settings.'
def validate_settings(self, filename, option_parser):
for section in self.sections(): for setting in self.options(section): try: option = option_parser.get_option_by_dest(setting) except KeyError: continue if option.validator: value = self.get(section, setting, raw=1) try: new_value = option.validator(setting, value, option_parser, config_parser=self, config_section=section) except Exception as error: raise (ValueError(('Error in config file "%s", section "[%s]":\n %s: %s\n %s = %s' % (filename, section, error.__class__.__name__, error, setting, value))), None, sys.exc_info()[2]) self.set(section, setting, new_value) if option.overrides: self.set(section, option.overrides, None)
'Transform \'-\' to \'_\' so the cmdline form of option names can be used.'
def optionxform(self, optionstr):
return optionstr.lower().replace('-', '_')
'Return a given section as a dictionary (empty if the section doesn\'t exist).'
def get_section(self, section):
section_dict = {} if self.has_section(section): for option in self.options(section): section_dict[option] = self.get(section, option, raw=1) return section_dict
'Return a copy of a title, with references, images, etc. removed.'
def copy_and_filter(self, node):
visitor = ContentsFilter(self.document) node.walkabout(visitor) return visitor.get_entry_text()
'Remove an empty "References" section. Called after the `references.TargetNotes` transform is complete.'
def cleanup_callback(self, pending):
if (len(pending.parent) == 2): pending.parent.parent.remove(pending.parent)
'Initial setup for in-place document transforms.'
def __init__(self, document, startnode=None):
self.document = document 'The document tree to transform.' self.startnode = startnode 'Node from which to begin the transform. For many transforms which\n apply to the document as a whole, `startnode` is not set (i.e. its\n value is `None`).' self.language = languages.get_language(document.settings.language_code) 'Language module local to this document.'
'Override to apply the transform to the document tree.'
def apply(self, **kwargs):
raise NotImplementedError('subclass must override this method')
'Store a single transform. Use `priority` to override the default. `kwargs` is a dictionary whose contents are passed as keyword arguments to the `apply` method of the transform. This can be used to pass application-specific data to the transform instance.'
def add_transform(self, transform_class, priority=None, **kwargs):
if (priority is None): priority = transform_class.default_priority priority_string = self.get_priority_string(priority) self.transforms.append((priority_string, transform_class, None, kwargs)) self.sorted = 0
'Store multiple transforms, with default priorities.'
def add_transforms(self, transform_list):
for transform_class in transform_list: priority_string = self.get_priority_string(transform_class.default_priority) self.transforms.append((priority_string, transform_class, None, {})) self.sorted = 0
'Store a transform with an associated `pending` node.'
def add_pending(self, pending, priority=None):
transform_class = pending.transform if (priority is None): priority = transform_class.default_priority priority_string = self.get_priority_string(priority) self.transforms.append((priority_string, transform_class, pending, {})) self.sorted = 0
'Return a string, `priority` combined with `self.serialno`. This ensures FIFO order on transforms with identical priority.'
def get_priority_string(self, priority):
self.serialno += 1 return ('%03d-%03d' % (priority, self.serialno))
'Store each component\'s default transforms, with default priorities. Also, store components by type name in a mapping for later lookup.'
def populate_from_components(self, components):
for component in components: if (component is None): continue self.add_transforms(component.get_transforms()) self.components[component.component_type] = component self.sorted = 0 unknown_reference_resolvers = [] for i in components: unknown_reference_resolvers.extend(i.unknown_reference_resolvers) decorated_list = [(f.priority, f) for f in unknown_reference_resolvers] decorated_list.sort() self.unknown_reference_resolvers.extend([f[1] for f in decorated_list])
'Apply all of the stored transforms, in priority order.'
def apply_transforms(self):
self.document.reporter.attach_observer(self.document.note_transform_message) while self.transforms: if (not self.sorted): self.transforms.sort() self.transforms.reverse() self.sorted = 1 (priority, transform_class, pending, kwargs) = self.transforms.pop() transform = transform_class(self.document, startnode=pending) transform.apply(**kwargs) self.applied.append((priority, transform_class, pending, kwargs))
'Given:: <paragraph> <reference refname="direct internal"> direct internal <target id="id1" name="direct internal"> The "refname" attribute is replaced by "refid" linking to the target\'s "id":: <paragraph> <reference refid="id1"> direct internal <target id="id1" name="direct internal">'
def resolve_reference_ids(self, target):
for name in target['names']: refid = self.document.nameids[name] reflist = self.document.refnames.get(name, []) if reflist: target.note_referenced_by(name=name) for ref in reflist: if ref.resolved: continue del ref['refname'] ref['refid'] = refid ref.resolved = 1
'Assign numbers to autonumbered footnotes. For labeled autonumbered footnotes, copy the number over to corresponding footnote references.'
def number_footnotes(self, startnum):
for footnote in self.document.autofootnotes: while 1: label = str(startnum) startnum += 1 if (not self.document.nameids.has_key(label)): break footnote.insert(0, nodes.label('', label)) for name in footnote['names']: for ref in self.document.footnote_refs.get(name, []): ref += nodes.Text(label) ref.delattr('refname') assert (len(footnote['ids']) == len(ref['ids']) == 1) ref['refid'] = footnote['ids'][0] footnote.add_backref(ref['ids'][0]) self.document.note_refid(ref) ref.resolved = 1 if ((not footnote['names']) and (not footnote['dupnames'])): footnote['names'].append(label) self.document.note_explicit_target(footnote, footnote) self.autofootnote_labels.append(label) return startnum
'Assign numbers to autonumbered footnote references.'
def number_footnote_references(self, startnum):
i = 0 for ref in self.document.autofootnote_refs: if (ref.resolved or ref.hasattr('refid')): continue try: label = self.autofootnote_labels[i] except IndexError: msg = self.document.reporter.error(('Too many autonumbered footnote references: only %s corresponding footnotes available.' % len(self.autofootnote_labels)), base_node=ref) msgid = self.document.set_id(msg) for ref in self.document.autofootnote_refs[i:]: if (ref.resolved or ref.hasattr('refname')): continue prb = nodes.problematic(ref.rawsource, ref.rawsource, refid=msgid) prbid = self.document.set_id(prb) msg.add_backref(prbid) ref.replace_self(prb) break ref += nodes.Text(label) id = self.document.nameids[label] footnote = self.document.ids[id] ref['refid'] = id self.document.note_refid(ref) assert (len(ref['ids']) == 1) footnote.add_backref(ref['ids'][0]) ref.resolved = 1 i += 1
'Add symbols indexes to "[*]"-style footnotes and references.'
def symbolize_footnotes(self):
labels = [] for footnote in self.document.symbol_footnotes: (reps, index) = divmod(self.document.symbol_footnote_start, len(self.symbols)) labeltext = (self.symbols[index] * (reps + 1)) labels.append(labeltext) footnote.insert(0, nodes.label('', labeltext)) self.document.symbol_footnote_start += 1 self.document.set_id(footnote) i = 0 for ref in self.document.symbol_footnote_refs: try: ref += nodes.Text(labels[i]) except IndexError: msg = self.document.reporter.error(('Too many symbol footnote references: only %s corresponding footnotes available.' % len(labels)), base_node=ref) msgid = self.document.set_id(msg) for ref in self.document.symbol_footnote_refs[i:]: if (ref.resolved or ref.hasattr('refid')): continue prb = nodes.problematic(ref.rawsource, ref.rawsource, refid=msgid) prbid = self.document.set_id(prb) msg.add_backref(prbid) ref.replace_self(prb) break footnote = self.document.symbol_footnotes[i] assert (len(footnote['ids']) == 1) ref['refid'] = footnote['ids'][0] self.document.note_refid(ref) footnote.add_backref(ref['ids'][0]) i += 1
'Link manually-labeled footnotes and citations to/from their references.'
def resolve_footnotes_and_citations(self):
for footnote in self.document.footnotes: for label in footnote['names']: if self.document.footnote_refs.has_key(label): reflist = self.document.footnote_refs[label] self.resolve_references(footnote, reflist) for citation in self.document.citations: for label in citation['names']: if self.document.citation_refs.has_key(label): reflist = self.document.citation_refs[label] self.resolve_references(citation, reflist)
'Transform the following tree:: <node> <section> <title> into :: <node> <title> `node` is normally a document.'
def promote_title(self, node):
assert (not (len(node) and isinstance(node[0], nodes.title))) (section, index) = self.candidate_index(node) if (index is None): return None node.attributes.update(section.attributes) node[:] = ((section[:1] + node[:index]) + section[1:]) assert isinstance(node[0], nodes.title) return 1
'Transform the following node tree:: <node> <title> <section> <title> into :: <node> <title> <subtitle>'
def promote_subtitle(self, node):
(subsection, index) = self.candidate_index(node) if (index is None): return None subtitle = nodes.subtitle() subtitle.attributes.update(subsection.attributes) subtitle[:] = subsection[0][:] node[:] = (((node[:1] + [subtitle]) + node[1:index]) + subsection[1:]) return 1
'Find and return the promotion candidate and its index. Return (None, None) if no valid candidate was found.'
def candidate_index(self, node):
index = node.first_child_not_matching_class(nodes.PreBibliographic) if ((index is None) or (len(node) > (index + 1)) or (not isinstance(node[index], nodes.section))): return (None, None) else: return (node[index], index)
'Set document[\'title\'] metadata title from the following sources, listed in order of priority: * Existing document[\'title\'] attribute. * "title" setting. * Document title node (as promoted by promote_title).'
def set_metadata(self):
if (not self.document.hasattr('title')): if (self.document.settings.title is not None): self.document['title'] = self.document.settings.title elif (len(self.document) and isinstance(self.document[0], nodes.title)): self.document['title'] = self.document[0].astext()
':Parameters: - `source`: The path to or description of the source data. - `report_level`: The level at or above which warning output will be sent to `stream`. - `halt_level`: The level at or above which `SystemMessage` exceptions will be raised, halting execution. - `debug`: Show debug (level=0) system messages? - `stream`: Where warning output is sent. Can be file-like (has a ``.write`` method), a string (file name, opened for writing), \'\' (empty string, for discarding all stream messages) or `None` (implies `sys.stderr`; default). - `encoding`: The encoding for stderr output. - `error_handler`: The error handler for stderr output encoding.'
def __init__(self, source, report_level, halt_level, stream=None, debug=0, encoding='ascii', error_handler='replace'):
self.source = source 'The path to or description of the source data.' self.encoding = encoding 'The character encoding for the stderr output.' self.error_handler = error_handler 'The character encoding error handler.' self.debug_flag = debug 'Show debug (level=0) system messages?' self.report_level = report_level 'The level at or above which warning output will be sent\n to `self.stream`.' self.halt_level = halt_level 'The level at or above which `SystemMessage` exceptions\n will be raised, halting execution.' if (stream is None): stream = sys.stderr elif (type(stream) in (StringType, UnicodeType)): if (stream != ''): if (type(stream) == StringType): stream = open(stream, 'w') elif (type(stream) == UnicodeType): stream = open(stream.encode(), 'w') self.stream = stream 'Where warning output is sent.' self.observers = [] 'List of bound methods or functions to call with each system_message\n created.' self.max_level = (-1) 'The highest level system message generated so far.'
'The `observer` parameter is a function or bound method which takes one argument, a `nodes.system_message` instance.'
def attach_observer(self, observer):
self.observers.append(observer)
'Return a system_message object. Raise an exception or generate a warning if appropriate.'
def system_message(self, level, message, *children, **kwargs):
attributes = kwargs.copy() if kwargs.has_key('base_node'): (source, line) = get_source_line(kwargs['base_node']) del attributes['base_node'] if (source is not None): attributes.setdefault('source', source) if (line is not None): attributes.setdefault('line', line) attributes.setdefault('source', self.source) msg = nodes.system_message(message, level=level, type=self.levels[level], *children, **attributes) if (self.stream and ((level >= self.report_level) or (self.debug_flag and (level == self.DEBUG_LEVEL)))): msgtext = msg.astext().encode(self.encoding, self.error_handler) print >>self.stream, msgtext if (level >= self.halt_level): raise SystemMessage(msg, level) if ((level > self.DEBUG_LEVEL) or self.debug_flag): self.notify_observers(msg) self.max_level = max(level, self.max_level) return msg
'Level-0, "DEBUG": an internal reporting issue. Typically, there is no effect on the processing. Level-0 system messages are handled separately from the others.'
def debug(self, *args, **kwargs):
if self.debug_flag: return self.system_message(self.DEBUG_LEVEL, *args, **kwargs)
'Level-1, "INFO": a minor issue that can be ignored. Typically there is no effect on processing, and level-1 system messages are not reported.'
def info(self, *args, **kwargs):
return self.system_message(self.INFO_LEVEL, *args, **kwargs)
'Level-2, "WARNING": an issue that should be addressed. If ignored, there may be unpredictable problems with the output.'
def warning(self, *args, **kwargs):
return self.system_message(self.WARNING_LEVEL, *args, **kwargs)
'Level-3, "ERROR": an error that should be addressed. If ignored, the output will contain errors.'
def error(self, *args, **kwargs):
return self.system_message(self.ERROR_LEVEL, *args, **kwargs)
'Level-4, "SEVERE": a severe error that must be addressed. If ignored, the output will contain severe errors. Typically level-4 system messages are turned into exceptions which halt processing.'
def severe(self, *args, **kwargs):
return self.system_message(self.SEVERE_LEVEL, *args, **kwargs)
'Initialize the dependency list, automatically setting the output file to `output_file` (see `set_output()`) and adding all supplied dependencies.'
def __init__(self, output_file=None, dependencies=[]):
self.set_output(output_file) for i in dependencies: self.add(i)
'Set the output file and clear the list of already added dependencies. `output_file` must be a string. The specified file is immediately overwritten. If output_file is \'-\', the output will be written to stdout. If it is None, no file output is done when calling add().'
def set_output(self, output_file):
self.list = [] if (output_file == '-'): self.file = sys.stdout elif output_file: self.file = open(output_file, 'w') else: self.file = None
'If the dependency `filename` has not already been added, append it to self.list and print it to self.file if self.file is not None.'
def add(self, filename):
if (not (filename in self.list)): self.list.append(filename) if (self.file is not None): print >>self.file, filename
'Close the output file.'
def close(self):
self.file.close() self.file = None
'Transforms required by this class. Override in subclasses.'
def get_transforms(self):
if (self.default_transforms != ()): import warnings warnings.warn('default_transforms attribute deprecated.\nUse get_transforms() method instead.', DeprecationWarning) return list(self.default_transforms) return []
'Is `format` supported by this component? To be used by transforms to ask the dependent component if it supports a certain input context or output format.'
def supports(self, format):
return (format in self.supported)
'Decode a string, `data`, heuristically. Raise UnicodeError if unsuccessful. The client application should call ``locale.setlocale`` at the beginning of processing:: locale.setlocale(locale.LC_ALL, \'\')'
def decode(self, data):
if (self.encoding and (self.encoding.lower() == 'unicode')): assert isinstance(data, UnicodeType), 'input encoding is "unicode" but input is not a unicode object' if isinstance(data, UnicodeType): return data if self.encoding: encodings = [self.encoding] else: data_encoding = self.determine_encoding_from_data(data) if data_encoding: encodings = [data_encoding] else: encodings = ['utf-8'] try: encodings.append(locale.nl_langinfo(locale.CODESET)) except: pass try: encodings.append(locale.getlocale()[1]) except: pass try: encodings.append(locale.getdefaultlocale()[1]) except: pass encodings.append('latin-1') error = None error_details = '' for enc in encodings: if (not enc): continue try: decoded = unicode(data, enc, self.error_handler) self.successful_encoding = enc return decoded.replace(u'\ufeff', u'') except (UnicodeError, LookupError) as error: pass if (error is not None): error_details = ('\n(%s: %s)' % (error.__class__.__name__, error)) raise UnicodeError(('Unable to decode input data. Tried the following encodings: %s.%s' % (', '.join([repr(enc) for enc in encodings if enc]), error_details)))
'Try to determine the encoding of `data` by looking *in* `data`. Check for a byte order mark (BOM) or an encoding declaration.'
def determine_encoding_from_data(self, data):
for (start_bytes, encoding) in self.byte_order_marks: if data.startswith(start_bytes): return encoding for line in data.splitlines()[:2]: match = self.coding_slug.search(line) if match: return match.group(1) return None
'`data` is a Unicode string, to be encoded by `self.encode`.'
def write(self, data):
raise NotImplementedError
'Emulate Python 2.3\'s \'xmlcharrefreplace\' encoding error handler.'
def xmlcharref_encode(self, char):
try: return char.encode(self.encoding, 'strict') except UnicodeError: return ('&#%i;' % ord(char))
':Parameters: - `source`: either a file-like object (which is read directly), or `None` (which implies `sys.stdin` if no `source_path` given). - `source_path`: a path to a file, which is opened and then read. - `encoding`: the expected text encoding of the input file. - `error_handler`: the encoding error handler to use. - `autoclose`: close automatically after read (boolean); always false if `sys.stdin` is the source. - `handle_io_errors`: summarize I/O errors here, and exit?'
def __init__(self, source=None, source_path=None, encoding=None, error_handler='strict', autoclose=1, handle_io_errors=1):
Input.__init__(self, source, source_path, encoding, error_handler) self.autoclose = autoclose self.handle_io_errors = handle_io_errors if (source is None): if source_path: try: self.source = open(source_path) except IOError as error: if (not handle_io_errors): raise print >>sys.stderr, ('%s: %s' % (error.__class__.__name__, error)) print >>sys.stderr, ('Unable to open source file for reading (%r). Exiting.' % source_path) sys.exit(1) else: self.source = sys.stdin self.autoclose = None if (not source_path): try: self.source_path = self.source.name except AttributeError: pass
'Read and decode a single file and return the data (Unicode string).'
def read(self):
try: data = self.source.read() finally: if self.autoclose: self.close() return self.decode(data)
':Parameters: - `destination`: either a file-like object (which is written directly) or `None` (which implies `sys.stdout` if no `destination_path` given). - `destination_path`: a path to a file, which is opened and then written. - `autoclose`: close automatically after write (boolean); always false if `sys.stdout` is the destination.'
def __init__(self, destination=None, destination_path=None, encoding=None, error_handler='strict', autoclose=1, handle_io_errors=1):
Output.__init__(self, destination, destination_path, encoding, error_handler) self.opened = 1 self.autoclose = autoclose self.handle_io_errors = handle_io_errors if (destination is None): if destination_path: self.opened = None else: self.destination = sys.stdout self.autoclose = None if (not destination_path): try: self.destination_path = self.destination.name except AttributeError: pass
'Encode `data`, write it to a single file, and return it.'
def write(self, data):
output = self.encode(data) if (not self.opened): self.open() try: self.destination.write(output) finally: if self.autoclose: self.close() return output
'Decode and return the source string.'
def read(self):
return self.decode(self.source)
'Encode `data`, store it in `self.destination`, and return it.'
def write(self, data):
self.destination = self.encode(data) return self.destination
'Return a null string.'
def read(self):
return u''
'Do nothing ([don\'t even] send data to the bit bucket).'
def write(self, data):
pass
'Return the document tree.'
def read(self):
return self.source
'Node instances are always true, even if they\'re empty. A node is more than a simple container. Its boolean "truth" does not depend on having one or more subnodes in the doctree. Use `len()` to check node length. Use `None` to represent a boolean false value.'
def __nonzero__(self):
return 1
'Return a DOM **fragment** representation of this Node.'
def asdom(self, dom=None):
if (dom is None): import xml.dom.minidom as dom domroot = dom.Document() return self._dom_node(domroot)
'Return an indented pseudo-XML representation, for test purposes. Override in subclasses.'
def pformat(self, indent=' ', level=0):
raise NotImplementedError
'Return a copy of self.'
def copy(self):
raise NotImplementedError
'Return a deep copy of self (also copying children).'
def deepcopy(self):
raise NotImplementedError
'Traverse a tree of `Node` objects, calling the `dispatch_visit()` method of `visitor` when entering each node. (The `walkabout()` method is similar, except it also calls the `dispatch_departure()` method before exiting each node.) This tree traversal supports limited in-place tree modifications. Replacing one node with one or more nodes is OK, as is removing an element. However, if the node removed or replaced occurs after the current node, the old node will still be traversed, and any new nodes will not. Within ``visit`` methods (and ``depart`` methods for `walkabout()`), `TreePruningException` subclasses may be raised (`SkipChildren`, `SkipSiblings`, `SkipNode`, `SkipDeparture`). Parameter `visitor`: A `NodeVisitor` object, containing a ``visit`` implementation for each `Node` subclass encountered. Return true if we should stop the traversal.'
def walk(self, visitor):
stop = 0 visitor.document.reporter.debug(('docutils.nodes.Node.walk calling dispatch_visit for %s' % self.__class__.__name__)) try: try: visitor.dispatch_visit(self) except (SkipChildren, SkipNode): return stop except SkipDeparture: pass children = self.children try: for child in children[:]: if child.walk(visitor): stop = 1 break except SkipSiblings: pass except StopTraversal: stop = 1 return stop
'Perform a tree traversal similarly to `Node.walk()` (which see), except also call the `dispatch_departure()` method before exiting each node. Parameter `visitor`: A `NodeVisitor` object, containing a ``visit`` and ``depart`` implementation for each `Node` subclass encountered. Return true if we should stop the traversal.'
def walkabout(self, visitor):
call_depart = 1 stop = 0 visitor.document.reporter.debug(('docutils.nodes.Node.walkabout calling dispatch_visit for %s' % self.__class__.__name__)) try: try: visitor.dispatch_visit(self) except SkipNode: return stop except SkipDeparture: call_depart = 0 children = self.children try: for child in children[:]: if child.walkabout(visitor): stop = 1 break except SkipSiblings: pass except SkipChildren: pass except StopTraversal: stop = 1 if call_depart: visitor.document.reporter.debug(('docutils.nodes.Node.walkabout calling dispatch_departure for %s' % self.__class__.__name__)) visitor.dispatch_departure(self) return stop
'Return an iterable containing * self (if include_self is true) * all descendants in tree traversal order (if descend is true) * all siblings (if siblings is true) and their descendants (if also descend is true) * the siblings of the parent (if ascend is true) and their descendants (if also descend is true), and so on If `condition` is not None, the iterable contains only nodes for which ``condition(node)`` is true. If `condition` is a node class ``cls``, it is equivalent to a function consisting of ``return isinstance(node, cls)``. If ascend is true, assume siblings to be true as well. For example, given the following tree:: <paragraph> <emphasis> <--- emphasis.traverse() and <strong> <--- strong.traverse() are called. Foo Bar <reference name="Baz" refid="baz"> Baz Then list(emphasis.traverse()) equals :: [<emphasis>, <strong>, <#text: Foo>, <#text: Bar>] and list(strong.traverse(ascend=1)) equals :: [<strong>, <#text: Foo>, <#text: Bar>, <reference>, <#text: Baz>]'
def traverse(self, condition=None, include_self=1, descend=1, siblings=0, ascend=0):
r = [] if ascend: siblings = 1 if isinstance(condition, (ClassType, TypeType)): node_class = condition def condition(node, node_class=node_class): return isinstance(node, node_class) if (include_self and ((condition is None) or condition(self))): r.append(self) if (descend and len(self.children)): for child in self: r.extend(child.traverse(include_self=1, descend=1, siblings=0, ascend=0, condition=condition)) if (siblings or ascend): node = self while node.parent: index = node.parent.index(node) for sibling in node.parent[(index + 1):]: r.extend(sibling.traverse(include_self=1, descend=descend, siblings=0, ascend=0, condition=condition)) if (not ascend): break else: node = node.parent return r
'Return the first node in the iterable returned by traverse(), or None if the iterable is empty. Parameter list is the same as of traverse. Note that include_self defaults to 0, though.'
def next_node(self, condition=None, include_self=0, descend=1, siblings=0, ascend=0):
iterable = self.traverse(condition=condition, include_self=include_self, descend=descend, siblings=siblings, ascend=ascend) try: return iterable[0] except IndexError: return None
'Append a node or a list of nodes to `self.children`.'
def __iadd__(self, other):
if isinstance(other, Node): self.append(other) elif (other is not None): self.extend(other) return self
'Update basic attributes (\'ids\', \'names\', \'classes\', \'dupnames\', but not \'source\') from node or dictionary `dict`.'
def update_basic_atts(self, dict):
if isinstance(dict, Node): dict = dict.attributes for att in ('ids', 'classes', 'names', 'dupnames'): for value in dict.get(att, []): if (not (value in self[att])): self[att].append(value)
'Replace one child `Node` with another child or children.'
def replace(self, old, new):
index = self.index(old) if isinstance(new, Node): self.setup_child(new) self[index] = new elif (new is not None): self[index:(index + 1)] = new
'Replace `self` node with `new`, where `new` is a node or a list of nodes.'
def replace_self(self, new):
update = new if (not isinstance(new, Node)): try: update = new[0] except IndexError: update = None if isinstance(update, Element): update.update_basic_atts(self) else: for att in ('ids', 'names', 'classes', 'dupnames'): assert (not self[att]), ('Losing "%s" attribute: %s' % (att, self[att])) self.parent.replace(self, new)
'Return the index of the first child whose class exactly matches. Parameters: - `childclass`: A `Node` subclass to search for, or a tuple of `Node` classes. If a tuple, any of the classes may match. - `start`: Initial index to check. - `end`: Initial index to *not* check.'
def first_child_matching_class(self, childclass, start=0, end=sys.maxint):
if (not isinstance(childclass, TupleType)): childclass = (childclass,) for index in range(start, min(len(self), end)): for c in childclass: if isinstance(self[index], c): return index return None
'Return the index of the first child whose class does *not* match. Parameters: - `childclass`: A `Node` subclass to skip, or a tuple of `Node` classes. If a tuple, none of the classes may match. - `start`: Initial index to check. - `end`: Initial index to *not* check.'
def first_child_not_matching_class(self, childclass, start=0, end=sys.maxint):
if (not isinstance(childclass, TupleType)): childclass = (childclass,) for index in range(start, min(len(self), end)): for c in childclass: if isinstance(self.children[index], c): break else: return index return None
'Add a new class to the "classes" attribute.'
def set_class(self, name):
warnings.warn("docutils.nodes.Element.set_class deprecated; append to Element['classes'] list attribute directly", DeprecationWarning, stacklevel=2) assert (' ' not in name) self['classes'].append(name.lower())
'Note that this Element has been referenced by its name `name` or id `id`.'
def note_referenced_by(self, name=None, id=None):
self.referenced = 1 by_name = getattr(self, 'expect_referenced_by_name', {}).get(name) by_id = getattr(self, 'expect_referenced_by_id', {}).get(id) if by_name: assert (name is not None) by_name.referenced = 1 if by_id: assert (id is not None) by_id.referenced = 1