desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Translate docutils encoding name into latex\'s. Default fallback method is remove "-" and "_" chars from docutils_encoding.'
def to_latex_encoding(self, docutils_encoding):
tr = {'iso-8859-1': 'latin1', 'iso-8859-2': 'latin2', 'iso-8859-3': 'latin3', 'iso-8859-4': 'latin4', 'iso-8859-5': 'iso88595', 'iso-8859-9': 'latin5', 'iso-8859-15': 'latin9', 'mac_cyrillic': 'maccyr', 'windows-1251': 'cp1251', 'koi8-r': 'koi8-r', 'koi8-u': 'koi8-u', 'windows-1250': 'cp1250', 'windows-1252': 'cp1252', 'us-ascii': 'ascii'} if tr.has_key(docutils_encoding.lower()): return tr[docutils_encoding.lower()] return docutils_encoding.replace('_', '').replace('-', '').lower()
'Encode special characters (``# $ % & ~ _ ^ \ { }``) in `text` & return'
def encode(self, text):
if self.verbatim: return text if (not self.__dict__.has_key('encode_re_braces')): self.encode_re_braces = re.compile('([{}])') text = self.encode_re_braces.sub('{\\\\\\1}', text) if (not self.__dict__.has_key('encode_re_bslash')): self.encode_re_bslash = re.compile('(?<!{)(\\\\)(?![{}]})') text = self.encode_re_bslash.sub('{\\\\textbackslash}', text) text = text.replace('$', '{\\$}') if (not (self.literal_block or self.literal or self.mathmode)): text = text.replace('|', '{\\textbar}') text = text.replace('<', '{\\textless}') text = text.replace('>', '{\\textgreater}') text = text.replace('&', '{\\&}') text = text.replace('^', '{\\textasciicircum}') text = text.replace('%', '{\\%}') text = text.replace('#', '{\\#}') text = text.replace('~', '{\\textasciitilde}') separate_chars = '-' if (self.literal_block or self.literal): separate_chars += ',`\'"<>' text = self.babel.double_quotes_in_tt(text) if (self.font_encoding == 'OT1'): text = text.replace('_', '{\\underline{ }}') text = text.replace('\\textbackslash', '\\reflectbox{/}') else: text = text.replace('_', '{\\_}') else: text = self.babel.quote_quotes(text) if (not self.inside_citation_reference_label): text = text.replace('_', '{\\_}') for char in (separate_chars * 2): text = text.replace((char + char), ((char + '{}') + char)) if (self.insert_newline or self.literal_block): text = text.replace('\n', '~\\\\\n') elif self.mbox_newline: if self.literal_block: closings = ('}' * len(self.literal_block_stack)) openings = ''.join(self.literal_block_stack) else: closings = '' openings = '' text = text.replace('\n', ('%s}\\\\\n\\mbox{%s' % (closings, openings))) text = text.replace('[', '{[}').replace(']', '{]}') if self.insert_none_breaking_blanks: text = text.replace(' ', '~') if (self.latex_encoding != 'utf8'): text = self.unicode_to_latex(text) text = self.ensure_math(text) return text
'Cleanse, encode, and return attribute value text.'
def attval(self, text, whitespace=re.compile('[\n\r DCTB \x0b\x0c]')):
return self.encode(whitespace.sub(' ', text))
'Render a literal-block. Literal blocks are used for "::"-prefixed literal-indented blocks of text, where the inline markup is not recognized, but are also the product of the parsed-literal directive, where the markup is respected.'
def visit_literal_block(self, node):
if (not self.active_table.is_open()): self.body.append('\\begin{quote}') self.context.append('\\end{quote}\n') else: self.body.append('\n') self.context.append('\n') if (self.settings.use_verbatim_when_possible and (len(node) == 1) and isinstance(node[0], nodes.Text)): self.verbatim = 1 self.body.append(self.literal_block_env('begin')) else: self.literal_block = 1 self.insert_none_breaking_blanks = 1 self.body.append('{\\ttfamily \\raggedright \\noindent\n')
'The delimiter betweeen an option and its argument.'
def visit_option_argument(self, node):
self.body.append(node.get('delimiter', ' '))
'Append latex href and pdfbookmarks for titles.'
def bookmark(self, node):
if node.parent['ids']: for id in node.parent['ids']: self.body.append(('\\hypertarget{%s}{}\n' % id)) if (not self.use_latex_toc): l = self.section_level if (l > 0): l = (l - 1) text = self.encode(node.astext()) for id in node.parent['ids']: self.body.append(('\\pdfbookmark[%d]{%s}{%s}\n' % (l, text, id)))
'Section and other titles.'
def visit_title(self, node):
if isinstance(node.parent, nodes.topic): self.bookmark(node) if (('contents' in self.topic_classes) and self.use_latex_toc): self.body.append('\\renewcommand{\\contentsname}{') self.context.append('}\n\\tableofcontents\n\n\\bigskip\n') elif (('abstract' in self.topic_classes) and self.settings.use_latex_abstract): raise nodes.SkipNode else: self.body.append('\\subsubsection*{~\\hfill ') self.context.append('\\hfill ~}\n') elif (isinstance(node.parent, nodes.sidebar) or isinstance(node.parent, nodes.admonition)): self.body.append('\\textbf{\\large ') self.context.append('}\n\\smallskip\n') elif isinstance(node.parent, nodes.table): self.active_table.caption = self.encode(node.astext()) raise nodes.SkipNode elif (self.section_level == 0): self.title = self.encode(node.astext()) if (not (self.pdfinfo == None)): self.pdfinfo.append(('pdftitle={%s}' % self.encode(node.astext()))) raise nodes.SkipNode else: self.body.append('\n\n') self.body.append(('%' + ('_' * 75))) self.body.append('\n\n') self.bookmark(node) if self.use_latex_toc: section_star = '' else: section_star = '*' section_name = self.d_class.section(self.section_level) self.body.append(('\\%s%s{' % (section_name, section_star))) self.context.append('}\n')
'This writer supports all format-specific elements.'
def supports(self, format):
return 1
'Override to parse `inputstring` into document tree `document`.'
def parse(self, inputstring, document):
raise NotImplementedError('subclass must override this method')
'Initial parse setup. Call at start of `self.parse()`.'
def setup_parse(self, inputstring, document):
self.inputstring = inputstring self.document = document document.reporter.attach_observer(document.note_parse_message)
'Finalize parse details. Call at end of `self.parse()`.'
def finish_parse(self):
self.document.reporter.detach_observer(self.document.note_parse_message)
'Parse `input_lines` and modify the `document` node in place. Extend `StateMachineWS.run()`: set up parse-global data and run the StateMachine.'
def run(self, input_lines, document, input_offset=0, match_titles=1, inliner=None):
self.language = languages.get_language(document.settings.language_code) self.match_titles = match_titles if (inliner is None): inliner = Inliner() inliner.init_customizations(document.settings) self.memo = Struct(document=document, reporter=document.reporter, language=self.language, title_styles=[], section_level=0, section_bubble_up_kludge=0, inliner=inliner) self.document = document self.attach_observer(document.note_source) self.reporter = self.memo.reporter self.node = document results = StateMachineWS.run(self, input_lines, input_offset, input_source=document['source']) assert (results == []), 'RSTStateMachine.run() results should be empty!' self.node = self.memo = None
'Parse `input_lines` and populate a `docutils.nodes.document` instance. Extend `StateMachineWS.run()`: set up document-wide data.'
def run(self, input_lines, input_offset, memo, node, match_titles=1):
self.match_titles = match_titles self.memo = memo self.document = memo.document self.attach_observer(self.document.note_source) self.reporter = memo.reporter self.language = memo.language self.node = node results = StateMachineWS.run(self, input_lines, input_offset) assert (results == []), 'NestedStateMachine.run() results should be empty!' return results
'Jump to input line `abs_line_offset`, ignoring jumps past the end.'
def goto_line(self, abs_line_offset):
try: self.state_machine.goto_line(abs_line_offset) except EOFError: pass
'Override `StateWS.no_match` to generate a system message. This code should never be run.'
def no_match(self, context, transitions):
self.reporter.severe(('Internal error: no transition pattern match. State: "%s"; transitions: %s; context: %s; current line: %r.' % (self.__class__.__name__, transitions, context, self.state_machine.line)), line=self.state_machine.abs_line_number()) return (context, None, [])
'Called at beginning of file.'
def bof(self, context):
return ([], [])
'Create a new StateMachine rooted at `node` and run it over the input `block`.'
def nested_parse(self, block, input_offset, node, match_titles=0, state_machine_class=None, state_machine_kwargs=None):
if (state_machine_class is None): state_machine_class = self.nested_sm if (state_machine_kwargs is None): state_machine_kwargs = self.nested_sm_kwargs block_length = len(block) state_machine = state_machine_class(debug=self.debug, **state_machine_kwargs) state_machine.run(block, input_offset, memo=self.memo, node=node, match_titles=match_titles) state_machine.unlink() new_offset = state_machine.abs_line_offset() if (block.parent and ((len(block) - block_length) != 0)): self.state_machine.next_line((len(block) - block_length)) return new_offset
'Create a new StateMachine rooted at `node` and run it over the input `block`. Also keep track of optional intermediate blank lines and the required final one.'
def nested_list_parse(self, block, input_offset, node, initial_state, blank_finish, blank_finish_state=None, extra_settings={}, match_titles=0, state_machine_class=None, state_machine_kwargs=None):
if (state_machine_class is None): state_machine_class = self.nested_sm if (state_machine_kwargs is None): state_machine_kwargs = self.nested_sm_kwargs.copy() state_machine_kwargs['initial_state'] = initial_state state_machine = state_machine_class(debug=self.debug, **state_machine_kwargs) if (blank_finish_state is None): blank_finish_state = initial_state state_machine.states[blank_finish_state].blank_finish = blank_finish for (key, value) in extra_settings.items(): setattr(state_machine.states[initial_state], key, value) state_machine.run(block, input_offset, memo=self.memo, node=node, match_titles=match_titles) blank_finish = state_machine.states[blank_finish_state].blank_finish state_machine.unlink() return (state_machine.abs_line_offset(), blank_finish)
'Check for a valid subsection and create one if it checks out.'
def section(self, title, source, style, lineno, messages):
if self.check_subsection(source, style, lineno): self.new_subsection(title, lineno, messages)
'Check for a valid subsection header. Return 1 (true) or None (false). When a new section is reached that isn\'t a subsection of the current section, back up the line count (use ``previous_line(-x)``), then ``raise EOFError``. The current StateMachine will finish, then the calling StateMachine can re-examine the title. This will work its way back up the calling chain until the correct section level isreached. @@@ Alternative: Evaluate the title, store the title info & level, and back up the chain until that level is reached. Store in memo? Or return in results? :Exception: `EOFError` when a sibling or supersection encountered.'
def check_subsection(self, source, style, lineno):
memo = self.memo title_styles = memo.title_styles mylevel = memo.section_level try: level = (title_styles.index(style) + 1) except ValueError: if (len(title_styles) == memo.section_level): title_styles.append(style) return 1 else: self.parent += self.title_inconsistent(source, lineno) return None if (level <= mylevel): memo.section_level = level if (len(style) == 2): memo.section_bubble_up_kludge = 1 self.state_machine.previous_line((len(style) + 1)) raise EOFError if (level == (mylevel + 1)): return 1 else: self.parent += self.title_inconsistent(source, lineno) return None
'Append new subsection to document tree. On return, check level.'
def new_subsection(self, title, lineno, messages):
memo = self.memo mylevel = memo.section_level memo.section_level += 1 section_node = nodes.section() self.parent += section_node (textnodes, title_messages) = self.inline_text(title, lineno) titlenode = nodes.title(title, '', *textnodes) name = normalize_name(titlenode.astext()) section_node['names'].append(name) section_node += titlenode section_node += messages section_node += title_messages self.document.note_implicit_target(section_node, section_node) offset = (self.state_machine.line_offset + 1) absoffset = (self.state_machine.abs_line_offset() + 1) newabsoffset = self.nested_parse(self.state_machine.input_lines[offset:], input_offset=absoffset, node=section_node, match_titles=1) self.goto_line(newabsoffset) if (memo.section_level <= mylevel): raise EOFError memo.section_level = mylevel
'Return a list (paragraph & messages) & a boolean: literal_block next?'
def paragraph(self, lines, lineno):
data = '\n'.join(lines).rstrip() if re.search('(?<!\\\\)(\\\\\\\\)*::$', data): if (len(data) == 2): return ([], 1) elif (data[(-3)] in ' \n'): text = data[:(-3)].rstrip() else: text = data[:(-1)] literalnext = 1 else: text = data literalnext = 0 (textnodes, messages) = self.inline_text(text, lineno) p = nodes.paragraph(data, '', *textnodes) p.line = lineno return (([p] + messages), literalnext)
'Return 2 lists: nodes (text and inline elements), and system_messages.'
def inline_text(self, text, lineno):
return self.inliner.parse(text, lineno, self.memo, self.parent)
'Setting-based customizations; run when parsing begins.'
def init_customizations(self, settings):
if settings.pep_references: self.implicit_dispatch.append((self.patterns.pep, self.pep_reference)) if settings.rfc_references: self.implicit_dispatch.append((self.patterns.rfc, self.rfc_reference))
'Return 2 lists: nodes (text and inline elements), and system_messages. Using `self.patterns.initial`, a pattern which matches start-strings (emphasis, strong, interpreted, phrase reference, literal, substitution reference, and inline target) and complete constructs (simple reference, footnote reference), search for a candidate. When one is found, check for validity (e.g., not a quoted \'*\' character). If valid, search for the corresponding end string if applicable, and check it for validity. If not found or invalid, generate a warning and ignore the start-string. Implicit inline markup (e.g. standalone URIs) is found last.'
def parse(self, text, lineno, memo, parent):
self.reporter = memo.reporter self.document = memo.document self.language = memo.language self.parent = parent pattern_search = self.patterns.initial.search dispatch = self.dispatch remaining = escape2null(text) processed = [] unprocessed = [] messages = [] while remaining: match = pattern_search(remaining) if match: groups = match.groupdict() method = dispatch[(groups['start'] or groups['backquote'] or groups['refend'] or groups['fnend'])] (before, inlines, remaining, sysmessages) = method(self, match, lineno) unprocessed.append(before) messages += sysmessages if inlines: processed += self.implicit_inline(''.join(unprocessed), lineno) processed += inlines unprocessed = [] else: break remaining = (''.join(unprocessed) + remaining) if remaining: processed += self.implicit_inline(remaining, lineno) return (processed, messages)
'Return 1 if inline markup start-string is \'quoted\', 0 if not.'
def quoted_start(self, match):
string = match.string start = match.start() end = match.end() if (start == 0): return 0 prestart = string[(start - 1)] try: poststart = string[end] if (self.openers.index(prestart) == self.closers.index(poststart)): return 1 except IndexError: return 1 except ValueError: pass return 0
'Handles `nodes.footnote_reference` and `nodes.citation_reference` elements.'
def footnote_reference(self, match, lineno):
label = match.group('footnotelabel') refname = normalize_name(label) string = match.string before = string[:match.start('whole')] remaining = string[match.end('whole'):] if match.group('citationlabel'): refnode = nodes.citation_reference(('[%s]_' % label), refname=refname) refnode += nodes.Text(label) self.document.note_citation_ref(refnode) else: refnode = nodes.footnote_reference(('[%s]_' % label)) if (refname[0] == '#'): refname = refname[1:] refnode['auto'] = 1 self.document.note_autofootnote_ref(refnode) elif (refname == '*'): refname = '' refnode['auto'] = '*' self.document.note_symbol_footnote_ref(refnode) else: refnode += nodes.Text(label) if refname: refnode['refname'] = refname self.document.note_footnote_ref(refnode) if utils.get_trim_footnote_ref_space(self.document.settings): before = before.rstrip() return (before, [refnode], remaining, [])
'Check each of the patterns in `self.implicit_dispatch` for a match, and dispatch to the stored method for the pattern. Recursively check the text before and after the match. Return a list of `nodes.Text` and inline element nodes.'
def implicit_inline(self, text, lineno):
if (not text): return [] for (pattern, method) in self.implicit_dispatch: match = pattern.search(text) if match: try: return ((self.implicit_inline(text[:match.start()], lineno) + method(match, lineno)) + self.implicit_inline(text[match.end():], lineno)) except MarkupMismatch: pass return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
'Block quote.'
def indent(self, match, context, next_state):
(indented, indent, line_offset, blank_finish) = self.state_machine.get_indented() elements = self.block_quote(indented, line_offset) self.parent += elements if (not blank_finish): self.parent += self.unindent_warning('Block quote') return (context, next_state, [])
'Check for a block quote attribution and split it off: * First line after a blank line must begin with a dash ("--", "---", em-dash; matches `self.attribution_pattern`). * Every line after that must have consistent indentation. * Attributions must be preceded by block quote content. Return a tuple of: (block quote content lines, content offset, attribution lines, attribution offset, remaining indented lines).'
def split_attribution(self, indented, line_offset):
blank = None nonblank_seen = False for i in range(len(indented)): line = indented[i].rstrip() if line: if (nonblank_seen and (blank == (i - 1))): match = self.attribution_pattern.match(line) if match: (attribution_end, indent) = self.check_attribution(indented, i) if attribution_end: a_lines = indented[i:attribution_end] a_lines.trim_left(match.end(), end=1) a_lines.trim_left(indent, start=1) return (indented[:i], a_lines, i, indented[attribution_end:], (line_offset + attribution_end)) nonblank_seen = True else: blank = i else: return (indented, None, None, None, None)
'Check attribution shape. Return the index past the end of the attribution, and the indent.'
def check_attribution(self, indented, attribution_start):
indent = None i = (attribution_start + 1) for i in range((attribution_start + 1), len(indented)): line = indented[i].rstrip() if (not line): break if (indent is None): indent = (len(line) - len(line.lstrip())) elif ((len(line) - len(line.lstrip())) != indent): return (None, None) else: i += 1 return (i, (indent or 0))
'Bullet list item.'
def bullet(self, match, context, next_state):
bulletlist = nodes.bullet_list() self.parent += bulletlist bulletlist['bullet'] = match.string[0] (i, blank_finish) = self.list_item(match.end()) bulletlist += i offset = (self.state_machine.line_offset + 1) (new_line_offset, blank_finish) = self.nested_list_parse(self.state_machine.input_lines[offset:], input_offset=(self.state_machine.abs_line_offset() + 1), node=bulletlist, initial_state='BulletList', blank_finish=blank_finish) self.goto_line(new_line_offset) if (not blank_finish): self.parent += self.unindent_warning('Bullet list') return ([], next_state, [])
'Enumerated List Item'
def enumerator(self, match, context, next_state):
(format, sequence, text, ordinal) = self.parse_enumerator(match) if (not self.is_enumerated_list_item(ordinal, sequence, format)): raise statemachine.TransitionCorrection('text') enumlist = nodes.enumerated_list() self.parent += enumlist if (sequence == '#'): enumlist['enumtype'] = 'arabic' else: enumlist['enumtype'] = sequence enumlist['prefix'] = self.enum.formatinfo[format].prefix enumlist['suffix'] = self.enum.formatinfo[format].suffix if (ordinal != 1): enumlist['start'] = ordinal msg = self.reporter.info(('Enumerated list start value not ordinal-1: "%s" (ordinal %s)' % (text, ordinal)), line=self.state_machine.abs_line_number()) self.parent += msg (listitem, blank_finish) = self.list_item(match.end()) enumlist += listitem offset = (self.state_machine.line_offset + 1) (newline_offset, blank_finish) = self.nested_list_parse(self.state_machine.input_lines[offset:], input_offset=(self.state_machine.abs_line_offset() + 1), node=enumlist, initial_state='EnumeratedList', blank_finish=blank_finish, extra_settings={'lastordinal': ordinal, 'format': format, 'auto': (sequence == '#')}) self.goto_line(newline_offset) if (not blank_finish): self.parent += self.unindent_warning('Enumerated list') return ([], next_state, [])
'Analyze an enumerator and return the results. :Return: - the enumerator format (\'period\', \'parens\', or \'rparen\'), - the sequence used (\'arabic\', \'loweralpha\', \'upperroman\', etc.), - the text of the enumerator, stripped of formatting, and - the ordinal value of the enumerator (\'a\' -> 1, \'ii\' -> 2, etc.; ``None`` is returned for invalid enumerator text). The enumerator format has already been determined by the regular expression match. If `expected_sequence` is given, that sequence is tried first. If not, we check for Roman numeral 1. This way, single-character Roman numerals (which are also alphabetical) can be matched. If no sequence has been matched, all sequences are checked in order.'
def parse_enumerator(self, match, expected_sequence=None):
groupdict = match.groupdict() sequence = '' for format in self.enum.formats: if groupdict[format]: break else: raise ParserError('enumerator format not matched') text = groupdict[format][self.enum.formatinfo[format].start:self.enum.formatinfo[format].end] if (text == '#'): sequence = '#' elif expected_sequence: try: if self.enum.sequenceregexps[expected_sequence].match(text): sequence = expected_sequence except KeyError: raise ParserError(('unknown enumerator sequence: %s' % sequence)) elif (text == 'i'): sequence = 'lowerroman' elif (text == 'I'): sequence = 'upperroman' if (not sequence): for sequence in self.enum.sequences: if self.enum.sequenceregexps[sequence].match(text): break else: raise ParserError('enumerator sequence not matched') if (sequence == '#'): ordinal = 1 else: try: ordinal = self.enum.converters[sequence](text) except roman.InvalidRomanNumeralError: ordinal = None return (format, sequence, text, ordinal)
'Check validity based on the ordinal value and the second line. Return true iff the ordinal is valid and the second line is blank, indented, or starts with the next enumerator or an auto-enumerator.'
def is_enumerated_list_item(self, ordinal, sequence, format):
if (ordinal is None): return None try: next_line = self.state_machine.next_line() except EOFError: self.state_machine.previous_line() return 1 else: self.state_machine.previous_line() if (not next_line[:1].strip()): return 1 result = self.make_enumerator((ordinal + 1), sequence, format) if result: (next_enumerator, auto_enumerator) = result try: if (next_line.startswith(next_enumerator) or next_line.startswith(auto_enumerator)): return 1 except TypeError: pass return None
'Construct and return the next enumerated list item marker, and an auto-enumerator ("#" instead of the regular enumerator). Return ``None`` for invalid (out of range) ordinals.'
def make_enumerator(self, ordinal, sequence, format):
if (sequence == '#'): enumerator = '#' elif (sequence == 'arabic'): enumerator = str(ordinal) else: if sequence.endswith('alpha'): if (ordinal > 26): return None enumerator = chr(((ordinal + ord('a')) - 1)) elif sequence.endswith('roman'): try: enumerator = roman.toRoman(ordinal) except roman.RomanError: return None else: raise ParserError(('unknown enumerator sequence: "%s"' % sequence)) if sequence.startswith('lower'): enumerator = enumerator.lower() elif sequence.startswith('upper'): enumerator = enumerator.upper() else: raise ParserError(('unknown enumerator sequence: "%s"' % sequence)) formatinfo = self.enum.formatinfo[format] next_enumerator = (((formatinfo.prefix + enumerator) + formatinfo.suffix) + ' ') auto_enumerator = (((formatinfo.prefix + '#') + formatinfo.suffix) + ' ') return (next_enumerator, auto_enumerator)
'Field list item.'
def field_marker(self, match, context, next_state):
field_list = nodes.field_list() self.parent += field_list (field, blank_finish) = self.field(match) field_list += field offset = (self.state_machine.line_offset + 1) (newline_offset, blank_finish) = self.nested_list_parse(self.state_machine.input_lines[offset:], input_offset=(self.state_machine.abs_line_offset() + 1), node=field_list, initial_state='FieldList', blank_finish=blank_finish) self.goto_line(newline_offset) if (not blank_finish): self.parent += self.unindent_warning('Field list') return ([], next_state, [])
'Extract & return field name from a field marker match.'
def parse_field_marker(self, match):
field = match.group()[1:] field = field[:field.rfind(':')] return field
'Option list item.'
def option_marker(self, match, context, next_state):
optionlist = nodes.option_list() try: (listitem, blank_finish) = self.option_list_item(match) except MarkupError as (message, lineno): msg = self.reporter.error(('Invalid option list marker: %s' % message), line=lineno) self.parent += msg (indented, indent, line_offset, blank_finish) = self.state_machine.get_first_known_indented(match.end()) elements = self.block_quote(indented, line_offset) self.parent += elements if (not blank_finish): self.parent += self.unindent_warning('Option list') return ([], next_state, []) self.parent += optionlist optionlist += listitem offset = (self.state_machine.line_offset + 1) (newline_offset, blank_finish) = self.nested_list_parse(self.state_machine.input_lines[offset:], input_offset=(self.state_machine.abs_line_offset() + 1), node=optionlist, initial_state='OptionList', blank_finish=blank_finish) self.goto_line(newline_offset) if (not blank_finish): self.parent += self.unindent_warning('Option list') return ([], next_state, [])
'Return a list of `node.option` and `node.option_argument` objects, parsed from an option marker match. :Exception: `MarkupError` for invalid option markers.'
def parse_option_marker(self, match):
optlist = [] optionstrings = match.group().rstrip().split(', ') for optionstring in optionstrings: tokens = optionstring.split() delimiter = ' ' firstopt = tokens[0].split('=') if (len(firstopt) > 1): tokens[:1] = firstopt delimiter = '=' elif ((len(tokens[0]) > 2) and ((tokens[0].startswith('-') and (not tokens[0].startswith('--'))) or tokens[0].startswith('+'))): tokens[:1] = [tokens[0][:2], tokens[0][2:]] delimiter = '' if ((len(tokens) > 1) and (tokens[1].startswith('<') and tokens[(-1)].endswith('>'))): tokens[1:] = [' '.join(tokens[1:])] if (0 < len(tokens) <= 2): option = nodes.option(optionstring) option += nodes.option_string(tokens[0], tokens[0]) if (len(tokens) > 1): option += nodes.option_argument(tokens[1], tokens[1], delimiter=delimiter) optlist.append(option) else: raise MarkupError(('wrong number of option tokens (=%s), should be 1 or 2: "%s"' % (len(tokens), optionstring)), (self.state_machine.abs_line_number() + 1)) return optlist
'First line of a line block.'
def line_block(self, match, context, next_state):
block = nodes.line_block() self.parent += block lineno = self.state_machine.abs_line_number() (line, messages, blank_finish) = self.line_block_line(match, lineno) block += line self.parent += messages if (not blank_finish): offset = (self.state_machine.line_offset + 1) (new_line_offset, blank_finish) = self.nested_list_parse(self.state_machine.input_lines[offset:], input_offset=(self.state_machine.abs_line_offset() + 1), node=block, initial_state='LineBlock', blank_finish=0) self.goto_line(new_line_offset) if (not blank_finish): self.parent += self.reporter.warning('Line block ends without a blank line.', line=(self.state_machine.abs_line_number() + 1)) if len(block): if (block[0].indent is None): block[0].indent = 0 self.nest_line_block_lines(block) return ([], next_state, [])
'Return one line element of a line_block.'
def line_block_line(self, match, lineno):
(indented, indent, line_offset, blank_finish) = self.state_machine.get_first_known_indented(match.end(), until_blank=1) text = u'\n'.join(indented) (text_nodes, messages) = self.inline_text(text, lineno) line = nodes.line(text, '', *text_nodes) if (match.string.rstrip() != '|'): line.indent = (len(match.group(1)) - 1) return (line, messages, blank_finish)
'Top border of a full table.'
def grid_table_top(self, match, context, next_state):
return self.table_top(match, context, next_state, self.isolate_grid_table, tableparser.GridTableParser)
'Top border of a simple table.'
def simple_table_top(self, match, context, next_state):
return self.table_top(match, context, next_state, self.isolate_simple_table, tableparser.SimpleTableParser)
'Top border of a generic table.'
def table_top(self, match, context, next_state, isolate_function, parser_class):
(nodelist, blank_finish) = self.table(isolate_function, parser_class) self.parent += nodelist if (not blank_finish): msg = self.reporter.warning('Blank line required after table.', line=(self.state_machine.abs_line_number() + 1)) self.parent += msg return ([], next_state, [])
'Parse a table.'
def table(self, isolate_function, parser_class):
(block, messages, blank_finish) = isolate_function() if block: try: parser = parser_class() tabledata = parser.parse(block) tableline = ((self.state_machine.abs_line_number() - len(block)) + 1) table = self.build_table(tabledata, tableline) nodelist = ([table] + messages) except tableparser.TableMarkupError as detail: nodelist = (self.malformed_table(block, ' '.join(detail.args)) + messages) else: nodelist = messages return (nodelist, blank_finish)
'Determine the type of reference of a target. :Return: A 2-tuple, one of: - \'refname\' and the indirect reference name - \'refuri\' and the URI - \'malformed\' and a system_message node'
def parse_target(self, block, block_text, lineno):
if (block and (block[(-1)].strip()[(-1):] == '_')): reference = ' '.join([line.strip() for line in block]) refname = self.is_reference(reference) if refname: return ('refname', refname) reference = ''.join([''.join(line.split()) for line in block]) return ('refuri', unescape(reference))
'Returns a 2-tuple: list of nodes, and a "blank finish" boolean.'
def directive(self, match, **option_presets):
type_name = match.group(1) (directive_class, messages) = directives.directive(type_name, self.memo.language, self.document) self.parent += messages if directive_class: return self.run_directive(directive_class, match, type_name, option_presets) else: return self.unknown_directive(type_name)
'Parse a directive then run its directive function. Parameters: - `directive`: The class implementing the directive. Must be a subclass of `rst.Directive`. - `match`: A regular expression match object which matched the first line of the directive. - `type_name`: The directive name, as used in the source text. - `option_presets`: A dictionary of preset options, defaults for the directive options. Currently, only an "alt" option is passed by substitution definitions (value: the substitution name), which may be used by an embedded image directive. Returns a 2-tuple: list of nodes, and a "blank finish" boolean.'
def run_directive(self, directive, match, type_name, option_presets):
if isinstance(directive, (FunctionType, MethodType)): from docutils.parsers.rst import convert_directive_function directive = convert_directive_function(directive) lineno = self.state_machine.abs_line_number() initial_line_offset = self.state_machine.line_offset (indented, indent, line_offset, blank_finish) = self.state_machine.get_first_known_indented(match.end(), strip_top=0) block_text = '\n'.join(self.state_machine.input_lines[initial_line_offset:(self.state_machine.line_offset + 1)]) try: (arguments, options, content, content_offset) = self.parse_directive_block(indented, line_offset, directive, option_presets) except MarkupError as detail: error = self.reporter.error(('Error in "%s" directive:\n%s.' % (type_name, ' '.join(detail.args))), nodes.literal_block(block_text, block_text), line=lineno) return ([error], blank_finish) directive_instance = directive(type_name, arguments, options, content, lineno, content_offset, block_text, self, self.state_machine) try: result = directive_instance.run() except docutils.parsers.rst.DirectiveError as directive_error: msg_node = self.reporter.system_message(directive_error.level, directive_error.message) msg_node += nodes.literal_block(block_text, block_text) msg_node['line'] = lineno result = [msg_node] assert isinstance(result, list), ('Directive "%s" must return a list of nodes.' % type_name) for i in range(len(result)): assert isinstance(result[i], nodes.Node), ('Directive "%s" returned non-Node object (index %s): %r' % (type_name, i, result[i])) return (result, (blank_finish or self.state_machine.is_next_line_blank()))
'Parse `datalines` for a field list containing extension options matching `option_spec`. :Parameters: - `option_spec`: a mapping of option name to conversion function, which should raise an exception on bad input. - `datalines`: a list of input strings. :Return: - Success value, 1 or 0. - An option dictionary on success, an error string on failure.'
def parse_extension_options(self, option_spec, datalines):
node = nodes.field_list() (newline_offset, blank_finish) = self.nested_list_parse(datalines, 0, node, initial_state='ExtensionOptions', blank_finish=1) if (newline_offset != len(datalines)): return (0, 'invalid option block') try: options = utils.extract_extension_options(node, option_spec) except KeyError as detail: return (0, ('unknown option: "%s"' % detail.args[0])) except (ValueError, TypeError) as detail: return (0, ('invalid option value: %s' % ' '.join(detail.args))) except utils.ExtensionOptionError as detail: return (0, ('invalid option data: %s' % ' '.join(detail.args))) if blank_finish: return (1, options) else: return (0, 'option data incompletely parsed')
'Footnotes, hyperlink targets, directives, comments.'
def explicit_markup(self, match, context, next_state):
(nodelist, blank_finish) = self.explicit_construct(match) self.parent += nodelist self.explicit_list(blank_finish) return ([], next_state, [])
'Determine which explicit construct this is, parse & return it.'
def explicit_construct(self, match):
errors = [] for (method, pattern) in self.explicit.constructs: expmatch = pattern.match(match.string) if expmatch: try: return method(self, expmatch) except MarkupError as (message, lineno): errors.append(self.reporter.warning(message, line=lineno)) break (nodelist, blank_finish) = self.comment(match) return ((nodelist + errors), blank_finish)
'Create a nested state machine for a series of explicit markup constructs (including anonymous hyperlink targets).'
def explicit_list(self, blank_finish):
offset = (self.state_machine.line_offset + 1) (newline_offset, blank_finish) = self.nested_list_parse(self.state_machine.input_lines[offset:], input_offset=(self.state_machine.abs_line_offset() + 1), node=self.parent, initial_state='Explicit', blank_finish=blank_finish, match_titles=self.state_machine.match_titles) self.goto_line(newline_offset) if (not blank_finish): self.parent += self.unindent_warning('Explicit markup')
'Anonymous hyperlink targets.'
def anonymous(self, match, context, next_state):
(nodelist, blank_finish) = self.anonymous_target(match) self.parent += nodelist self.explicit_list(blank_finish) return ([], next_state, [])
'Section title overline or transition marker.'
def line(self, match, context, next_state):
if self.state_machine.match_titles: return ([match.string], 'Line', []) elif (match.string.strip() == '::'): raise statemachine.TransitionCorrection('text') elif (len(match.string.strip()) < 4): msg = self.reporter.info("Unexpected possible title overline or transition.\nTreating it as ordinary text because it's so short.", line=self.state_machine.abs_line_number()) self.parent += msg raise statemachine.TransitionCorrection('text') else: blocktext = self.state_machine.line msg = self.reporter.severe('Unexpected section title or transition.', nodes.literal_block(blocktext, blocktext), line=self.state_machine.abs_line_number()) self.parent += msg return ([], next_state, [])
'Titles, definition lists, paragraphs.'
def text(self, match, context, next_state):
return ([match.string], 'Text', [])
'RFC2822-style field list item.'
def rfc2822(self, match, context, next_state):
fieldlist = nodes.field_list(classes=['rfc2822']) self.parent += fieldlist (field, blank_finish) = self.rfc2822_field(match) fieldlist += field offset = (self.state_machine.line_offset + 1) (newline_offset, blank_finish) = self.nested_list_parse(self.state_machine.input_lines[offset:], input_offset=(self.state_machine.abs_line_offset() + 1), node=fieldlist, initial_state='RFC2822List', blank_finish=blank_finish) self.goto_line(newline_offset) if (not blank_finish): self.parent += self.unindent_warning('RFC2822-style field list') return ([], next_state, [])
'Not a compound element member. Abort this state machine.'
def invalid_input(self, match=None, context=None, next_state=None):
self.state_machine.previous_line() raise EOFError
'Bullet list item.'
def bullet(self, match, context, next_state):
if (match.string[0] != self.parent['bullet']): self.invalid_input() (listitem, blank_finish) = self.list_item(match.end()) self.parent += listitem self.blank_finish = blank_finish return ([], next_state, [])
'Definition lists.'
def text(self, match, context, next_state):
return ([match.string], 'Definition', [])
'Enumerated list item.'
def enumerator(self, match, context, next_state):
(format, sequence, text, ordinal) = self.parse_enumerator(match, self.parent['enumtype']) if ((format != self.format) or ((sequence != '#') and ((sequence != self.parent['enumtype']) or self.auto or (ordinal != (self.lastordinal + 1)))) or (not self.is_enumerated_list_item(ordinal, sequence, format))): self.invalid_input() if (sequence == '#'): self.auto = 1 (listitem, blank_finish) = self.list_item(match.end()) self.parent += listitem self.blank_finish = blank_finish self.lastordinal = ordinal return ([], next_state, [])
'Field list field.'
def field_marker(self, match, context, next_state):
(field, blank_finish) = self.field(match) self.parent += field self.blank_finish = blank_finish return ([], next_state, [])
'Option list item.'
def option_marker(self, match, context, next_state):
try: (option_list_item, blank_finish) = self.option_list_item(match) except MarkupError as (message, lineno): self.invalid_input() self.parent += option_list_item self.blank_finish = blank_finish return ([], next_state, [])
'RFC2822-style field list item.'
def rfc2822(self, match, context, next_state):
(field, blank_finish) = self.rfc2822_field(match) self.parent += field self.blank_finish = blank_finish return ([], 'RFC2822List', [])
'Override `Body.parse_field_body` for simpler parsing.'
def parse_field_body(self, indented, offset, node):
lines = [] for line in (list(indented) + ['']): if line.strip(): lines.append(line) elif lines: text = '\n'.join(lines) node += nodes.paragraph(text, text) lines = []
'New line of line block.'
def line_block(self, match, context, next_state):
lineno = self.state_machine.abs_line_number() (line, messages, blank_finish) = self.line_block_line(match, lineno) self.parent += line self.parent.parent += messages self.blank_finish = blank_finish return ([], next_state, [])
'Footnotes, hyperlink targets, directives, comments.'
def explicit_markup(self, match, context, next_state):
(nodelist, blank_finish) = self.explicit_construct(match) self.parent += nodelist self.blank_finish = blank_finish return ([], next_state, [])
'Anonymous hyperlink targets.'
def anonymous(self, match, context, next_state):
(nodelist, blank_finish) = self.anonymous_target(match) self.parent += nodelist self.blank_finish = blank_finish return ([], next_state, [])
'End of paragraph.'
def blank(self, match, context, next_state):
(paragraph, literalnext) = self.paragraph(context, (self.state_machine.abs_line_number() - 1)) self.parent += paragraph if literalnext: self.parent += self.literal_block() return ([], 'Body', [])
'Definition list item.'
def indent(self, match, context, next_state):
definitionlist = nodes.definition_list() (definitionlistitem, blank_finish) = self.definition_list_item(context) definitionlist += definitionlistitem self.parent += definitionlist offset = (self.state_machine.line_offset + 1) (newline_offset, blank_finish) = self.nested_list_parse(self.state_machine.input_lines[offset:], input_offset=(self.state_machine.abs_line_offset() + 1), node=definitionlist, initial_state='DefinitionList', blank_finish=blank_finish, blank_finish_state='Definition') self.goto_line(newline_offset) if (not blank_finish): self.parent += self.unindent_warning('Definition list') return ([], 'Body', [])
'Section title.'
def underline(self, match, context, next_state):
lineno = self.state_machine.abs_line_number() title = context[0].rstrip() underline = match.string.rstrip() source = ((title + '\n') + underline) messages = [] if (column_width(title) > len(underline)): if (len(underline) < 4): if self.state_machine.match_titles: msg = self.reporter.info("Possible title underline, too short for the title.\nTreating it as ordinary text because it's so short.", line=lineno) self.parent += msg raise statemachine.TransitionCorrection('text') else: blocktext = ((context[0] + '\n') + self.state_machine.line) msg = self.reporter.warning('Title underline too short.', nodes.literal_block(blocktext, blocktext), line=lineno) messages.append(msg) if (not self.state_machine.match_titles): blocktext = ((context[0] + '\n') + self.state_machine.line) msg = self.reporter.severe('Unexpected section title.', nodes.literal_block(blocktext, blocktext), line=lineno) self.parent += messages self.parent += msg return ([], next_state, []) style = underline[0] context[:] = [] self.section(title, source, style, (lineno - 1), messages) return ([], next_state, [])
'Paragraph.'
def text(self, match, context, next_state):
startline = (self.state_machine.abs_line_number() - 1) msg = None try: block = self.state_machine.get_text_block(flush_left=1) except statemachine.UnexpectedIndentationError as instance: (block, source, lineno) = instance.args msg = self.reporter.error('Unexpected indentation.', source=source, line=lineno) lines = (context + list(block)) (paragraph, literalnext) = self.paragraph(lines, startline) self.parent += paragraph self.parent += msg if literalnext: try: self.state_machine.next_line() except EOFError: pass self.parent += self.literal_block() return ([], next_state, [])
'Return a list of nodes.'
def literal_block(self):
(indented, indent, offset, blank_finish) = self.state_machine.get_indented() while (indented and (not indented[(-1)].strip())): indented.trim_end() if (not indented): return self.quoted_literal_block() data = '\n'.join(indented) literal_block = nodes.literal_block(data, data) literal_block.line = (offset + 1) nodelist = [literal_block] if (not blank_finish): nodelist.append(self.unindent_warning('Literal block')) return nodelist
'Return a definition_list\'s term and optional classifiers.'
def term(self, lines, lineno):
assert (len(lines) == 1) (text_nodes, messages) = self.inline_text(lines[0], lineno) term_node = nodes.term() node_list = [term_node] for i in range(len(text_nodes)): node = text_nodes[i] if isinstance(node, nodes.Text): parts = self.classifier_delimiter.split(node.rawsource) if (len(parts) == 1): node_list[(-1)] += node else: node_list[(-1)] += nodes.Text(parts[0].rstrip()) for part in parts[1:]: classifier_node = nodes.classifier('', part) node_list.append(classifier_node) else: node_list[(-1)] += node return (node_list, messages)
'Incomplete construct.'
def eof(self, context):
return []
'Not a compound element member. Abort this state machine.'
def invalid_input(self, match=None, context=None, next_state=None):
raise EOFError
'Not a definition.'
def eof(self, context):
self.state_machine.previous_line(2) return []
'Definition list item.'
def indent(self, match, context, next_state):
(definitionlistitem, blank_finish) = self.definition_list_item(context) self.parent += definitionlistitem self.blank_finish = blank_finish return ([], 'DefinitionList', [])
'Transition marker at end of section or document.'
def eof(self, context):
marker = context[0].strip() if self.memo.section_bubble_up_kludge: self.memo.section_bubble_up_kludge = 0 elif (len(marker) < 4): self.state_correction(context) if self.eofcheck: lineno = (self.state_machine.abs_line_number() - 1) transition = nodes.transition(rawsource=context[0]) transition.line = lineno self.parent += transition self.eofcheck = 1 return []
'Transition marker.'
def blank(self, match, context, next_state):
lineno = (self.state_machine.abs_line_number() - 1) marker = context[0].strip() if (len(marker) < 4): self.state_correction(context) transition = nodes.transition(rawsource=marker) transition.line = lineno self.parent += transition return ([], 'Body', [])
'Potential over- & underlined title.'
def text(self, match, context, next_state):
lineno = (self.state_machine.abs_line_number() - 1) overline = context[0] title = match.string underline = '' try: underline = self.state_machine.next_line() except EOFError: blocktext = ((overline + '\n') + title) if (len(overline.rstrip()) < 4): self.short_overline(context, blocktext, lineno, 2) else: msg = self.reporter.severe('Incomplete section title.', nodes.literal_block(blocktext, blocktext), line=lineno) self.parent += msg return ([], 'Body', []) source = ('%s\n%s\n%s' % (overline, title, underline)) overline = overline.rstrip() underline = underline.rstrip() if (not self.transitions['underline'][0].match(underline)): blocktext = ((((overline + '\n') + title) + '\n') + underline) if (len(overline.rstrip()) < 4): self.short_overline(context, blocktext, lineno, 2) else: msg = self.reporter.severe('Missing matching underline for section title overline.', nodes.literal_block(source, source), line=lineno) self.parent += msg return ([], 'Body', []) elif (overline != underline): blocktext = ((((overline + '\n') + title) + '\n') + underline) if (len(overline.rstrip()) < 4): self.short_overline(context, blocktext, lineno, 2) else: msg = self.reporter.severe('Title overline & underline mismatch.', nodes.literal_block(source, source), line=lineno) self.parent += msg return ([], 'Body', []) title = title.rstrip() messages = [] if (column_width(title) > len(overline)): blocktext = ((((overline + '\n') + title) + '\n') + underline) if (len(overline.rstrip()) < 4): self.short_overline(context, blocktext, lineno, 2) else: msg = self.reporter.warning('Title overline too short.', nodes.literal_block(source, source), line=lineno) messages.append(msg) style = (overline[0], underline[0]) self.eofcheck = 0 self.section(title.lstrip(), source, style, (lineno + 1), messages) self.eofcheck = 1 return ([], 'Body', [])
'Match arbitrary quote character on the first line only.'
def initial_quoted(self, match, context, next_state):
self.remove_transition('initial_quoted') quote = match.string[0] pattern = re.compile(re.escape(quote)) self.add_transition('quoted', (pattern, self.quoted, self.__class__.__name__)) self.initial_lineno = self.state_machine.abs_line_number() return ([match.string], next_state, [])
'Match consistent quotes on subsequent lines.'
def quoted(self, match, context, next_state):
context.append(match.string) return (context, next_state, [])
'Parse `inputstring` and populate `document`, a document tree.'
def parse(self, inputstring, document):
self.setup_parse(inputstring, document) self.statemachine = states.RSTStateMachine(state_classes=self.state_classes, initial_state=self.initial_state, debug=document.reporter.debug_flag) inputlines = docutils.statemachine.string2lines(inputstring, tab_width=document.settings.tab_width, convert_whitespace=1) self.statemachine.run(inputlines, document, inliner=self.inliner) self.finish_parse()
'Initialize with message `message`. `level` is a system message level.'
def __init__(self, level, message):
Exception.__init__(self) self.level = level self.message = message
'Return a DirectiveError suitable for being thrown as an exception. Call "raise self.directive_error(level, message)" from within a directive implementation to return one single system message at level `level`, which automatically gets the directive block and the line number added. You\'d often use self.error(message) instead, which will generate an ERROR-level directive error.'
def directive_error(self, level, message):
return DirectiveError(level, message)
'Throw an ERROR-level DirectiveError if the directive doesn\'t have contents.'
def assert_has_content(self):
if (not self.content): raise self.error(('Content block expected for the "%s" directive; none found.' % self.name))
'Include a reST file as part of the content of this reST file.'
def run(self):
if (not self.state.document.settings.file_insertion_enabled): raise self.warning(('"%s" directive disabled.' % self.name)) source = self.state_machine.input_lines.source(((self.lineno - self.state_machine.input_offset) - 1)) source_dir = os.path.dirname(os.path.abspath(source)) path = directives.path(self.arguments[0]) if (path.startswith('<') and path.endswith('>')): path = os.path.join(self.standard_include_path, path[1:(-1)]) path = os.path.normpath(os.path.join(source_dir, path)) path = utils.relative_path(None, path) encoding = self.options.get('encoding', self.state.document.settings.input_encoding) try: self.state.document.settings.record_dependencies.add(path) include_file = io.FileInput(source_path=path, encoding=encoding, error_handler=self.state.document.settings.input_encoding_error_handler, handle_io_errors=None) except IOError as error: raise self.severe(('Problems with "%s" directive path:\n%s: %s.' % (self.name, error.__class__.__name__, error))) try: include_text = include_file.read() except UnicodeError as error: raise self.severe(('Problem with "%s" directive:\n%s: %s' % (self.name, error.__class__.__name__, error))) after_text = self.options.get('start-after', None) if after_text: after_index = include_text.find(after_text) if (after_index < 0): raise self.severe(('Problem with "start-after" option of "%s" directive:\nText not found.' % self.name)) include_text = include_text[(after_index + len(after_text)):] before_text = self.options.get('end-before', None) if before_text: before_index = include_text.find(before_text) if (before_index < 0): raise self.severe(('Problem with "end-before" option of "%s" directive:\nText not found.' % self.name)) include_text = include_text[:before_index] if self.options.has_key('literal'): literal_block = nodes.literal_block(include_text, include_text, source=path) literal_block.line = 1 return [literal_block] else: include_lines = statemachine.string2lines(include_text, convert_whitespace=1) self.state_machine.insert_input(include_lines, path) return []
'Dynamically create and register a custom interpreted text role.'
def run(self):
if ((self.content_offset > self.lineno) or (not self.content)): raise self.error(('"%s" directive requires arguments on the first line.' % self.name)) args = self.content[0] match = self.argument_pattern.match(args) if (not match): raise self.error(('"%s" directive arguments not valid role names: "%s".' % (self.name, args))) new_role_name = match.group(1) base_role_name = match.group(3) messages = [] if base_role_name: (base_role, messages) = roles.role(base_role_name, self.state_machine.language, self.lineno, self.state.reporter) if (base_role is None): error = self.state.reporter.error(('Unknown interpreted text role "%s".' % base_role_name), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return (messages + [error]) else: base_role = roles.generic_custom_role assert (not hasattr(base_role, 'arguments')), ('Supplemental directive arguments for "%s" directive not supported (specified by "%r" role).' % (self.name, base_role)) try: converted_role = convert_directive_function(base_role) (arguments, options, content, content_offset) = self.state.parse_directive_block(self.content[1:], self.content_offset, converted_role, option_presets={}) except states.MarkupError as detail: error = self.state_machine.reporter.error(('Error in "%s" directive:\n%s.' % (self.name, detail)), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return (messages + [error]) if (not options.has_key('class')): try: options['class'] = directives.class_option(new_role_name) except ValueError as detail: error = self.state_machine.reporter.error(('Invalid argument for "%s" directive:\n%s.' % (self.name, detail)), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return (messages + [error]) role = roles.CustomRole(new_role_name, base_role, options, content) roles.register_local_role(new_role_name, role) return messages
'Get CSV data from the directive content, from an external file, or from a URL reference.'
def get_csv_data(self):
encoding = self.options.get('encoding', self.state.document.settings.input_encoding) if self.content: if (self.options.has_key('file') or self.options.has_key('url')): error = self.state_machine.reporter.error(('"%s" directive may not both specify an external file and have content.' % self.name), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) source = self.content.source(0) csv_data = self.content elif self.options.has_key('file'): if self.options.has_key('url'): error = self.state_machine.reporter.error(('The "file" and "url" options may not be simultaneously specified for the "%s" directive.' % self.name), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) source_dir = os.path.dirname(os.path.abspath(self.state.document.current_source)) source = os.path.normpath(os.path.join(source_dir, self.options['file'])) source = utils.relative_path(None, source) try: self.state.document.settings.record_dependencies.add(source) csv_file = io.FileInput(source_path=source, encoding=encoding, error_handler=self.state.document.settings.input_encoding_error_handler, handle_io_errors=None) csv_data = csv_file.read().splitlines() except IOError as error: severe = self.state_machine.reporter.severe(('Problems with "%s" directive path:\n%s.' % (self.name, error)), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(severe) elif self.options.has_key('url'): import urllib2 source = self.options['url'] try: csv_text = urllib2.urlopen(source).read() except (urllib2.URLError, IOError, OSError, ValueError) as error: severe = self.state_machine.reporter.severe(('Problems with "%s" directive URL "%s":\n%s.' % (self.name, self.options['url'], error)), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(severe) csv_file = io.StringInput(source=csv_text, source_path=source, encoding=encoding, error_handler=self.state.document.settings.input_encoding_error_handler) csv_data = csv_file.read().splitlines() else: error = self.state_machine.reporter.warning(('The "%s" directive requires content; none supplied.' % self.name), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) return (csv_data, source)
'Meta element.'
def field_marker(self, match, context, next_state):
(node, blank_finish) = self.parsemeta(match) self.parent += node return ([], next_state, [])
'Analyze the text `block` and return a table data structure. Given a plaintext-graphic table in `block` (list of lines of text; no whitespace padding), parse the table, construct and return the data necessary to construct a CALS table or equivalent. Raise `TableMarkupError` if there is any problem with the markup.'
def parse(self, block):
self.setup(block) self.find_head_body_sep() self.parse_table() structure = self.structure_from_cells() return structure
'Look for a head/body row separator line; store the line index.'
def find_head_body_sep(self):
for i in range(len(self.block)): line = self.block[i] if self.head_body_separator_pat.match(line): if self.head_body_sep: raise TableMarkupError(('Multiple head/body row separators in table (at line offset %s and %s); only one allowed.' % (self.head_body_sep, i))) else: self.head_body_sep = i self.block[i] = line.replace('=', '-') if ((self.head_body_sep == 0) or (self.head_body_sep == (len(self.block) - 1))): raise TableMarkupError('The head/body row separator may not be the first or last line of the table.')
'Start with a queue of upper-left corners, containing the upper-left corner of the table itself. Trace out one rectangular cell, remember it, and add its upper-right and lower-left corners to the queue of potential upper-left corners of further cells. Process the queue in top-to-bottom order, keeping track of how much of each text column has been seen. We\'ll end up knowing all the row and column boundaries, cell positions and their dimensions.'
def parse_table(self):
corners = [(0, 0)] while corners: (top, left) = corners.pop(0) if ((top == self.bottom) or (left == self.right) or (top <= self.done[left])): continue result = self.scan_cell(top, left) if (not result): continue (bottom, right, rowseps, colseps) = result update_dict_of_lists(self.rowseps, rowseps) update_dict_of_lists(self.colseps, colseps) self.mark_done(top, left, bottom, right) cellblock = self.block.get_2D_block((top + 1), (left + 1), bottom, right) cellblock.disconnect() cellblock.replace(self.double_width_pad_char, '') self.cells.append((top, left, bottom, right, cellblock)) corners.extend([(top, right), (bottom, left)]) corners.sort() if (not self.check_parse_complete()): raise TableMarkupError('Malformed table; parse incomplete.')
'For keeping track of how much of each text column has been seen.'
def mark_done(self, top, left, bottom, right):
before = (top - 1) after = (bottom - 1) for col in range(left, right): assert (self.done[col] == before) self.done[col] = after
'Each text column should have been completely seen.'
def check_parse_complete(self):
last = (self.bottom - 1) for col in range(self.right): if (self.done[col] != last): return None return 1
'Starting at the top-left corner, start tracing out a cell.'
def scan_cell(self, top, left):
assert (self.block[top][left] == '+') result = self.scan_right(top, left) return result
'Look for the top-right corner of the cell, and make note of all column boundaries (\'+\').'
def scan_right(self, top, left):
colseps = {} line = self.block[top] for i in range((left + 1), (self.right + 1)): if (line[i] == '+'): colseps[i] = [top] result = self.scan_down(top, left, i) if result: (bottom, rowseps, newcolseps) = result update_dict_of_lists(colseps, newcolseps) return (bottom, i, rowseps, colseps) elif (line[i] != '-'): return None return None
'Look for the bottom-right corner of the cell, making note of all row boundaries.'
def scan_down(self, top, left, right):
rowseps = {} for i in range((top + 1), (self.bottom + 1)): if (self.block[i][right] == '+'): rowseps[i] = [right] result = self.scan_left(top, left, i, right) if result: (newrowseps, colseps) = result update_dict_of_lists(rowseps, newrowseps) return (i, rowseps, colseps) elif (self.block[i][right] != '|'): return None return None
'Noting column boundaries, look for the bottom-left corner of the cell. It must line up with the starting point.'
def scan_left(self, top, left, bottom, right):
colseps = {} line = self.block[bottom] for i in range((right - 1), left, (-1)): if (line[i] == '+'): colseps[i] = [bottom] elif (line[i] != '-'): return None if (line[left] != '+'): return None result = self.scan_up(top, left, bottom, right) if (result is not None): rowseps = result return (rowseps, colseps) return None