desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Special optimized matcher for bare_name.'
def _bare_name_matches(self, nodes):
count = 0 r = {} done = False max = len(nodes) while ((not done) and (count < max)): done = True for leaf in self.content: if leaf[0].match(nodes[count], r): count += 1 done = False break r[self.name] = nodes[:count] return (count, r)
'Helper to recursively yield the matches.'
def _recursive_matches(self, nodes, count):
assert (self.content is not None) if (count >= self.min): (yield (0, {})) if (count < self.max): for alt in self.content: for (c0, r0) in generate_matches(alt, nodes): for (c1, r1) in self._recursive_matches(nodes[c0:], (count + 1)): r = {} r.update(r0) r.update(r1) (yield ((c0 + c1), r))
'Initializer. The argument is either a pattern or None. If it is None, this only matches an empty sequence (effectively \'$\' in regex lingo). If it is not None, this matches whenever the argument pattern doesn\'t have any matches.'
def __init__(self, content=None):
if (content is not None): assert isinstance(content, BasePattern), repr(content) self.content = content
'Initializer. Subclass may override. Args: options: a dict containing the options passed to RefactoringTool that could be used to customize the fixer through the command line. log: a list to append warnings and other messages to.'
def __init__(self, options, log):
self.options = options self.log = log self.compile_pattern()
'Compiles self.PATTERN into self.pattern. Subclass may override if it doesn\'t want to use self.{pattern,PATTERN} in .match().'
def compile_pattern(self):
if (self.PATTERN is not None): PC = PatternCompiler() (self.pattern, self.pattern_tree) = PC.compile_pattern(self.PATTERN, with_tree=True)
'Set the filename, and a logger derived from it. The main refactoring tool should call this.'
def set_filename(self, filename):
self.filename = filename self.logger = logging.getLogger(filename)
'Returns match for a given parse tree node. Should return a true or false object (not necessarily a bool). It may return a non-empty dict of matching sub-nodes as returned by a matching pattern. Subclass may override.'
def match(self, node):
results = {'node': node} return (self.pattern.match(node, results) and results)
'Returns the transformation for a given parse tree node. Args: node: the root of the parse tree that matched the fixer. results: a dict mapping symbolic names to part of the match. Returns: None, or a node that is a modified copy of the argument node. The node argument may also be modified in-place to effect the same change. Subclass *must* override.'
def transform(self, node, results):
raise NotImplementedError()
'Return a string suitable for use as an identifier The new name is guaranteed not to conflict with other identifiers.'
def new_name(self, template=u'xxx_todo_changeme'):
name = template while (name in self.used_names): name = (template + unicode(self.numbers.next())) self.used_names.add(name) return name
'Warn the user that a given chunk of code is not valid Python 3, but that it cannot be converted automatically. First argument is the top-level node for the code in question. Optional second argument is why it can\'t be converted.'
def cannot_convert(self, node, reason=None):
lineno = node.get_lineno() for_output = node.clone() for_output.prefix = u'' msg = 'Line %d: could not convert: %s' self.log_message((msg % (lineno, for_output))) if reason: self.log_message(reason)
'Used for warning the user about possible uncertainty in the translation. First argument is the top-level node for the code in question. Optional second argument is why it can\'t be converted.'
def warning(self, node, reason):
lineno = node.get_lineno() self.log_message(('Line %d: %s' % (lineno, reason)))
'Some fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from.'
def start_tree(self, tree, filename):
self.used_names = tree.used_names self.set_filename(filename) self.numbers = itertools.count(1) self.first_log = True
'Some fixers need to maintain tree-wide state. This method is called once, at the conclusion of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from.'
def finish_tree(self, tree, filename):
pass
'Construct a SequenceMatcher. Optional arg isjunk is None (the default), or a one-argument function that takes a sequence element and returns true iff the element is junk. None is equivalent to passing "lambda x: 0", i.e. no elements are considered to be junk. For example, pass lambda x: x in " \t" if you\'re comparing lines as sequences of characters, and don\'t want to synch up on blanks or hard tabs. Optional arg a is the first of two sequences to be compared. By default, an empty string. The elements of a must be hashable. See also .set_seqs() and .set_seq1(). Optional arg b is the second of two sequences to be compared. By default, an empty string. The elements of b must be hashable. See also .set_seqs() and .set_seq2(). Optional arg autojunk should be set to False to disable the "automatic junk heuristic" that treats popular elements as junk (see module documentation for more information).'
def __init__(self, isjunk=None, a='', b='', autojunk=True):
self.isjunk = isjunk self.a = self.b = None self.autojunk = autojunk self.set_seqs(a, b)
'Set the two sequences to be compared. >>> s = SequenceMatcher() >>> s.set_seqs("abcd", "bcde") >>> s.ratio() 0.75'
def set_seqs(self, a, b):
self.set_seq1(a) self.set_seq2(b)
'Set the first sequence to be compared. The second sequence to be compared is not changed. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.set_seq1("bcde") >>> s.ratio() 1.0 SequenceMatcher computes and caches detailed information about the second sequence, so if you want to compare one sequence S against many sequences, use .set_seq2(S) once and call .set_seq1(x) repeatedly for each of the other sequences. See also set_seqs() and set_seq2().'
def set_seq1(self, a):
if (a is self.a): return self.a = a self.matching_blocks = self.opcodes = None
'Set the second sequence to be compared. The first sequence to be compared is not changed. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.set_seq2("abcd") >>> s.ratio() 1.0 SequenceMatcher computes and caches detailed information about the second sequence, so if you want to compare one sequence S against many sequences, use .set_seq2(S) once and call .set_seq1(x) repeatedly for each of the other sequences. See also set_seqs() and set_seq1().'
def set_seq2(self, b):
if (b is self.b): return self.b = b self.matching_blocks = self.opcodes = None self.fullbcount = None self.__chain_b()
'Find longest matching block in a[alo:ahi] and b[blo:bhi]. If isjunk is not defined: Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where alo <= i <= i+k <= ahi blo <= j <= j+k <= bhi and for all (i\',j\',k\') meeting those conditions, k >= k\' i <= i\' and if i == i\', j <= j\' In other words, of all maximal matching blocks, return one that starts earliest in a, and of all those maximal matching blocks that start earliest in a, return the one that starts earliest in b. >>> s = SequenceMatcher(None, " abcd", "abcd abcd") >>> s.find_longest_match(0, 5, 0, 9) Match(a=0, b=4, size=5) If isjunk is defined, first the longest matching block is determined as above, but with the additional restriction that no junk element appears in the block. Then that block is extended as far as possible by matching (only) junk elements on both sides. So the resulting block never matches on junk except as identical junk happens to be adjacent to an "interesting" match. Here\'s the same example as before, but considering blanks to be junk. That prevents " abcd" from matching the " abcd" at the tail end of the second sequence directly. Instead only the "abcd" can match, and matches the leftmost "abcd" in the second sequence: >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd") >>> s.find_longest_match(0, 5, 0, 9) Match(a=1, b=0, size=4) If no blocks match, return (alo, blo, 0). >>> s = SequenceMatcher(None, "ab", "c") >>> s.find_longest_match(0, 2, 0, 1) Match(a=0, b=0, size=0)'
def find_longest_match(self, alo, ahi, blo, bhi):
(a, b, b2j, isbjunk) = (self.a, self.b, self.b2j, self.isbjunk) (besti, bestj, bestsize) = (alo, blo, 0) j2len = {} nothing = [] for i in xrange(alo, ahi): j2lenget = j2len.get newj2len = {} for j in b2j.get(a[i], nothing): if (j < blo): continue if (j >= bhi): break k = newj2len[j] = (j2lenget((j - 1), 0) + 1) if (k > bestsize): (besti, bestj, bestsize) = (((i - k) + 1), ((j - k) + 1), k) j2len = newj2len while ((besti > alo) and (bestj > blo) and (not isbjunk(b[(bestj - 1)])) and (a[(besti - 1)] == b[(bestj - 1)])): (besti, bestj, bestsize) = ((besti - 1), (bestj - 1), (bestsize + 1)) while (((besti + bestsize) < ahi) and ((bestj + bestsize) < bhi) and (not isbjunk(b[(bestj + bestsize)])) and (a[(besti + bestsize)] == b[(bestj + bestsize)])): bestsize += 1 while ((besti > alo) and (bestj > blo) and isbjunk(b[(bestj - 1)]) and (a[(besti - 1)] == b[(bestj - 1)])): (besti, bestj, bestsize) = ((besti - 1), (bestj - 1), (bestsize + 1)) while (((besti + bestsize) < ahi) and ((bestj + bestsize) < bhi) and isbjunk(b[(bestj + bestsize)]) and (a[(besti + bestsize)] == b[(bestj + bestsize)])): bestsize = (bestsize + 1) return Match(besti, bestj, bestsize)
'Return list of triples describing matching subsequences. Each triple is of the form (i, j, n), and means that a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in i and in j. New in Python 2.5, it\'s also guaranteed that if (i, j, n) and (i\', j\', n\') are adjacent triples in the list, and the second is not the last triple in the list, then i+n != i\' or j+n != j\'. IOW, adjacent triples never describe adjacent equal blocks. The last triple is a dummy, (len(a), len(b), 0), and is the only triple with n==0. >>> s = SequenceMatcher(None, "abxcd", "abcd") >>> s.get_matching_blocks() [Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]'
def get_matching_blocks(self):
if (self.matching_blocks is not None): return self.matching_blocks (la, lb) = (len(self.a), len(self.b)) queue = [(0, la, 0, lb)] matching_blocks = [] while queue: (alo, ahi, blo, bhi) = queue.pop() (i, j, k) = x = self.find_longest_match(alo, ahi, blo, bhi) if k: matching_blocks.append(x) if ((alo < i) and (blo < j)): queue.append((alo, i, blo, j)) if (((i + k) < ahi) and ((j + k) < bhi)): queue.append(((i + k), ahi, (j + k), bhi)) matching_blocks.sort() i1 = j1 = k1 = 0 non_adjacent = [] for (i2, j2, k2) in matching_blocks: if (((i1 + k1) == i2) and ((j1 + k1) == j2)): k1 += k2 else: if k1: non_adjacent.append((i1, j1, k1)) (i1, j1, k1) = (i2, j2, k2) if k1: non_adjacent.append((i1, j1, k1)) non_adjacent.append((la, lb, 0)) self.matching_blocks = map(Match._make, non_adjacent) return self.matching_blocks
'Return list of 5-tuples describing how to turn a into b. Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the tuple preceding it, and likewise for j1 == the previous j2. The tags are strings, with these meanings: \'replace\': a[i1:i2] should be replaced by b[j1:j2] \'delete\': a[i1:i2] should be deleted. Note that j1==j2 in this case. \'insert\': b[j1:j2] should be inserted at a[i1:i1]. Note that i1==i2 in this case. \'equal\': a[i1:i2] == b[j1:j2] >>> a = "qabxcd" >>> b = "abycdf" >>> s = SequenceMatcher(None, a, b) >>> for tag, i1, i2, j1, j2 in s.get_opcodes(): ... print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" % ... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])) delete a[0:1] (q) b[0:0] () equal a[1:3] (ab) b[0:2] (ab) replace a[3:4] (x) b[2:3] (y) equal a[4:6] (cd) b[3:5] (cd) insert a[6:6] () b[5:6] (f)'
def get_opcodes(self):
if (self.opcodes is not None): return self.opcodes i = j = 0 self.opcodes = answer = [] for (ai, bj, size) in self.get_matching_blocks(): tag = '' if ((i < ai) and (j < bj)): tag = 'replace' elif (i < ai): tag = 'delete' elif (j < bj): tag = 'insert' if tag: answer.append((tag, i, ai, j, bj)) (i, j) = ((ai + size), (bj + size)) if size: answer.append(('equal', ai, i, bj, j)) return answer
'Isolate change clusters by eliminating ranges with no changes. Return a generator of groups with up to n lines of context. Each group is in the same format as returned by get_opcodes(). >>> from pprint import pprint >>> a = map(str, range(1,40)) >>> b = a[:] >>> b[8:8] = [\'i\'] # Make an insertion >>> b[20] += \'x\' # Make a replacement >>> b[23:28] = [] # Make a deletion >>> b[30] += \'y\' # Make another replacement >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes())) [[(\'equal\', 5, 8, 5, 8), (\'insert\', 8, 8, 8, 9), (\'equal\', 8, 11, 9, 12)], [(\'equal\', 16, 19, 17, 20), (\'replace\', 19, 20, 20, 21), (\'equal\', 20, 22, 21, 23), (\'delete\', 22, 27, 23, 23), (\'equal\', 27, 30, 23, 26)], [(\'equal\', 31, 34, 27, 30), (\'replace\', 34, 35, 30, 31), (\'equal\', 35, 38, 31, 34)]]'
def get_grouped_opcodes(self, n=3):
codes = self.get_opcodes() if (not codes): codes = [('equal', 0, 1, 0, 1)] if (codes[0][0] == 'equal'): (tag, i1, i2, j1, j2) = codes[0] codes[0] = (tag, max(i1, (i2 - n)), i2, max(j1, (j2 - n)), j2) if (codes[(-1)][0] == 'equal'): (tag, i1, i2, j1, j2) = codes[(-1)] codes[(-1)] = (tag, i1, min(i2, (i1 + n)), j1, min(j2, (j1 + n))) nn = (n + n) group = [] for (tag, i1, i2, j1, j2) in codes: if ((tag == 'equal') and ((i2 - i1) > nn)): group.append((tag, i1, min(i2, (i1 + n)), j1, min(j2, (j1 + n)))) (yield group) group = [] (i1, j1) = (max(i1, (i2 - n)), max(j1, (j2 - n))) group.append((tag, i1, i2, j1, j2)) if (group and (not ((len(group) == 1) and (group[0][0] == 'equal')))): (yield group)
'Return a measure of the sequences\' similarity (float in [0,1]). Where T is the total number of elements in both sequences, and M is the number of matches, this is 2.0*M / T. Note that this is 1 if the sequences are identical, and 0 if they have nothing in common. .ratio() is expensive to compute if you haven\'t already computed .get_matching_blocks() or .get_opcodes(), in which case you may want to try .quick_ratio() or .real_quick_ratio() first to get an upper bound. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.quick_ratio() 0.75 >>> s.real_quick_ratio() 1.0'
def ratio(self):
matches = reduce((lambda sum, triple: (sum + triple[(-1)])), self.get_matching_blocks(), 0) return _calculate_ratio(matches, (len(self.a) + len(self.b)))
'Return an upper bound on ratio() relatively quickly. This isn\'t defined beyond that it is an upper bound on .ratio(), and is faster to compute.'
def quick_ratio(self):
if (self.fullbcount is None): self.fullbcount = fullbcount = {} for elt in self.b: fullbcount[elt] = (fullbcount.get(elt, 0) + 1) fullbcount = self.fullbcount avail = {} (availhas, matches) = (avail.__contains__, 0) for elt in self.a: if availhas(elt): numb = avail[elt] else: numb = fullbcount.get(elt, 0) avail[elt] = (numb - 1) if (numb > 0): matches = (matches + 1) return _calculate_ratio(matches, (len(self.a) + len(self.b)))
'Return an upper bound on ratio() very quickly. This isn\'t defined beyond that it is an upper bound on .ratio(), and is faster to compute than either .ratio() or .quick_ratio().'
def real_quick_ratio(self):
(la, lb) = (len(self.a), len(self.b)) return _calculate_ratio(min(la, lb), (la + lb))
'Construct a text differencer, with optional filters. The two optional keyword parameters are for filter functions: - `linejunk`: A function that should accept a single string argument, and return true iff the string is junk. The module-level function `IS_LINE_JUNK` may be used to filter out lines without visible characters, except for at most one splat (\'#\'). It is recommended to leave linejunk None; as of Python 2.3, the underlying SequenceMatcher class has grown an adaptive notion of "noise" lines that\'s better than any static definition the author has ever been able to craft. - `charjunk`: A function that should accept a string of length 1. The module-level function `IS_CHARACTER_JUNK` may be used to filter out whitespace characters (a blank or tab; **note**: bad idea to include newline in this!). Use of IS_CHARACTER_JUNK is recommended.'
def __init__(self, linejunk=None, charjunk=None):
self.linejunk = linejunk self.charjunk = charjunk
'Compare two sequences of lines; generate the resulting delta. Each sequence must contain individual single-line strings ending with newlines. Such sequences can be obtained from the `readlines()` method of file-like objects. The delta generated also consists of newline- terminated strings, ready to be printed as-is via the writeline() method of a file-like object. Example: >>> print \'\'.join(Differ().compare(\'one\ntwo\nthree\n\'.splitlines(1), ... \'ore\ntree\nemu\n\'.splitlines(1))), - one + ore - two - three + tree + emu'
def compare(self, a, b):
cruncher = SequenceMatcher(self.linejunk, a, b) for (tag, alo, ahi, blo, bhi) in cruncher.get_opcodes(): if (tag == 'replace'): g = self._fancy_replace(a, alo, ahi, b, blo, bhi) elif (tag == 'delete'): g = self._dump('-', a, alo, ahi) elif (tag == 'insert'): g = self._dump('+', b, blo, bhi) elif (tag == 'equal'): g = self._dump(' ', a, alo, ahi) else: raise ValueError, ('unknown tag %r' % (tag,)) for line in g: (yield line)
'Generate comparison results for a same-tagged range.'
def _dump(self, tag, x, lo, hi):
for i in xrange(lo, hi): (yield ('%s %s' % (tag, x[i])))
'When replacing one block of lines with another, search the blocks for *similar* lines; the best-matching pair (if any) is used as a synch point, and intraline difference marking is done on the similar pair. Lots of work, but often worth it. Example: >>> d = Differ() >>> results = d._fancy_replace([\'abcDefghiJkl\n\'], 0, 1, ... [\'abcdefGhijkl\n\'], 0, 1) >>> print \'\'.join(results), - abcDefghiJkl + abcdefGhijkl'
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
(best_ratio, cutoff) = (0.74, 0.75) cruncher = SequenceMatcher(self.charjunk) (eqi, eqj) = (None, None) for j in xrange(blo, bhi): bj = b[j] cruncher.set_seq2(bj) for i in xrange(alo, ahi): ai = a[i] if (ai == bj): if (eqi is None): (eqi, eqj) = (i, j) continue cruncher.set_seq1(ai) if ((cruncher.real_quick_ratio() > best_ratio) and (cruncher.quick_ratio() > best_ratio) and (cruncher.ratio() > best_ratio)): (best_ratio, best_i, best_j) = (cruncher.ratio(), i, j) if (best_ratio < cutoff): if (eqi is None): for line in self._plain_replace(a, alo, ahi, b, blo, bhi): (yield line) return (best_i, best_j, best_ratio) = (eqi, eqj, 1.0) else: eqi = None for line in self._fancy_helper(a, alo, best_i, b, blo, best_j): (yield line) (aelt, belt) = (a[best_i], b[best_j]) if (eqi is None): atags = btags = '' cruncher.set_seqs(aelt, belt) for (tag, ai1, ai2, bj1, bj2) in cruncher.get_opcodes(): (la, lb) = ((ai2 - ai1), (bj2 - bj1)) if (tag == 'replace'): atags += ('^' * la) btags += ('^' * lb) elif (tag == 'delete'): atags += ('-' * la) elif (tag == 'insert'): btags += ('+' * lb) elif (tag == 'equal'): atags += (' ' * la) btags += (' ' * lb) else: raise ValueError, ('unknown tag %r' % (tag,)) for line in self._qformat(aelt, belt, atags, btags): (yield line) else: (yield (' ' + aelt)) for line in self._fancy_helper(a, (best_i + 1), ahi, b, (best_j + 1), bhi): (yield line)
'Format "?" output and deal with leading tabs. Example: >>> d = Differ() >>> results = d._qformat(\'\tabcDefghiJkl\n\', \'\tabcdefGhijkl\n\', >>> for line in results: print repr(line) \'- \tabcDefghiJkl\n\' \'? \t ^ ^ ^\n\' \'+ \tabcdefGhijkl\n\' \'? \t ^ ^ ^\n\''
def _qformat(self, aline, bline, atags, btags):
common = min(_count_leading(aline, ' DCTB '), _count_leading(bline, ' DCTB ')) common = min(common, _count_leading(atags[:common], ' ')) common = min(common, _count_leading(btags[:common], ' ')) atags = atags[common:].rstrip() btags = btags[common:].rstrip() (yield ('- ' + aline)) if atags: (yield ('? %s%s\n' % ((' DCTB ' * common), atags))) (yield ('+ ' + bline)) if btags: (yield ('? %s%s\n' % ((' DCTB ' * common), btags)))
'HtmlDiff instance initializer Arguments: tabsize -- tab stop spacing, defaults to 8. wrapcolumn -- column number where lines are broken and wrapped, defaults to None where lines are not wrapped. linejunk,charjunk -- keyword arguments passed into ndiff() (used to by HtmlDiff() to generate the side by side HTML differences). See ndiff() documentation for argument default values and descriptions.'
def __init__(self, tabsize=8, wrapcolumn=None, linejunk=None, charjunk=IS_CHARACTER_JUNK):
self._tabsize = tabsize self._wrapcolumn = wrapcolumn self._linejunk = linejunk self._charjunk = charjunk
'Returns HTML file of side by side comparison with change highlights Arguments: fromlines -- list of "from" lines tolines -- list of "to" lines fromdesc -- "from" file column header string todesc -- "to" file column header string context -- set to True for contextual differences (defaults to False which shows full differences). numlines -- number of context lines. When context is set True, controls number of lines displayed before and after the change. When context is False, controls the number of lines to place the "next" link anchors before the next change (so click of "next" link jumps to just before the change).'
def make_file(self, fromlines, tolines, fromdesc='', todesc='', context=False, numlines=5):
return (self._file_template % dict(styles=self._styles, legend=self._legend, table=self.make_table(fromlines, tolines, fromdesc, todesc, context=context, numlines=numlines)))
'Returns from/to line lists with tabs expanded and newlines removed. Instead of tab characters being replaced by the number of spaces needed to fill in to the next tab stop, this function will fill the space with tab characters. This is done so that the difference algorithms can identify changes in a file when tabs are replaced by spaces and vice versa. At the end of the HTML generation, the tab characters will be replaced with a nonbreakable space.'
def _tab_newline_replace(self, fromlines, tolines):
def expand_tabs(line): line = line.replace(' ', '\x00') line = line.expandtabs(self._tabsize) line = line.replace(' ', ' DCTB ') return line.replace('\x00', ' ').rstrip('\n') fromlines = [expand_tabs(line) for line in fromlines] tolines = [expand_tabs(line) for line in tolines] return (fromlines, tolines)
'Builds list of text lines by splitting text lines at wrap point This function will determine if the input text line needs to be wrapped (split) into separate lines. If so, the first wrap point will be determined and the first line appended to the output text line list. This function is used recursively to handle the second part of the split line to further split it.'
def _split_line(self, data_list, line_num, text):
if (not line_num): data_list.append((line_num, text)) return size = len(text) max = self._wrapcolumn if ((size <= max) or ((size - (text.count('\x00') * 3)) <= max)): data_list.append((line_num, text)) return i = 0 n = 0 mark = '' while ((n < max) and (i < size)): if (text[i] == '\x00'): i += 1 mark = text[i] i += 1 elif (text[i] == '\x01'): i += 1 mark = '' else: i += 1 n += 1 line1 = text[:i] line2 = text[i:] if mark: line1 = (line1 + '\x01') line2 = (('\x00' + mark) + line2) data_list.append((line_num, line1)) self._split_line(data_list, '>', line2)
'Returns iterator that splits (wraps) mdiff text lines'
def _line_wrapper(self, diffs):
for (fromdata, todata, flag) in diffs: if (flag is None): (yield (fromdata, todata, flag)) continue ((fromline, fromtext), (toline, totext)) = (fromdata, todata) (fromlist, tolist) = ([], []) self._split_line(fromlist, fromline, fromtext) self._split_line(tolist, toline, totext) while (fromlist or tolist): if fromlist: fromdata = fromlist.pop(0) else: fromdata = ('', ' ') if tolist: todata = tolist.pop(0) else: todata = ('', ' ') (yield (fromdata, todata, flag))
'Collects mdiff output into separate lists Before storing the mdiff from/to data into a list, it is converted into a single line of text with HTML markup.'
def _collect_lines(self, diffs):
(fromlist, tolist, flaglist) = ([], [], []) for (fromdata, todata, flag) in diffs: try: fromlist.append(self._format_line(0, flag, *fromdata)) tolist.append(self._format_line(1, flag, *todata)) except TypeError: fromlist.append(None) tolist.append(None) flaglist.append(flag) return (fromlist, tolist, flaglist)
'Returns HTML markup of "from" / "to" text lines side -- 0 or 1 indicating "from" or "to" text flag -- indicates if difference on line linenum -- line number (used for line number column) text -- line text to be marked up'
def _format_line(self, side, flag, linenum, text):
try: linenum = ('%d' % linenum) id = (' id="%s%s"' % (self._prefix[side], linenum)) except TypeError: id = '' text = text.replace('&', '&amp;').replace('>', '&gt;').replace('<', '&lt;') text = text.replace(' ', '&nbsp;').rstrip() return ('<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' % (id, linenum, text))
'Create unique anchor prefixes'
def _make_prefix(self):
fromprefix = ('from%d_' % HtmlDiff._default_prefix) toprefix = ('to%d_' % HtmlDiff._default_prefix) HtmlDiff._default_prefix += 1 self._prefix = [fromprefix, toprefix]
'Makes list of "next" links'
def _convert_flags(self, fromlist, tolist, flaglist, context, numlines):
toprefix = self._prefix[1] next_id = ([''] * len(flaglist)) next_href = ([''] * len(flaglist)) (num_chg, in_change) = (0, False) last = 0 for (i, flag) in enumerate(flaglist): if flag: if (not in_change): in_change = True last = i i = max([0, (i - numlines)]) next_id[i] = (' id="difflib_chg_%s_%d"' % (toprefix, num_chg)) num_chg += 1 next_href[last] = ('<a href="#difflib_chg_%s_%d">n</a>' % (toprefix, num_chg)) else: in_change = False if (not flaglist): flaglist = [False] next_id = [''] next_href = [''] last = 0 if context: fromlist = ['<td></td><td>&nbsp;No Differences Found&nbsp;</td>'] tolist = fromlist else: fromlist = tolist = ['<td></td><td>&nbsp;Empty File&nbsp;</td>'] if (not flaglist[0]): next_href[0] = ('<a href="#difflib_chg_%s_0">f</a>' % toprefix) next_href[last] = ('<a href="#difflib_chg_%s_top">t</a>' % toprefix) return (fromlist, tolist, flaglist, next_href, next_id)
'Returns HTML table of side by side comparison with change highlights Arguments: fromlines -- list of "from" lines tolines -- list of "to" lines fromdesc -- "from" file column header string todesc -- "to" file column header string context -- set to True for contextual differences (defaults to False which shows full differences). numlines -- number of context lines. When context is set True, controls number of lines displayed before and after the change. When context is False, controls the number of lines to place the "next" link anchors before the next change (so click of "next" link jumps to just before the change).'
def make_table(self, fromlines, tolines, fromdesc='', todesc='', context=False, numlines=5):
self._make_prefix() (fromlines, tolines) = self._tab_newline_replace(fromlines, tolines) if context: context_lines = numlines else: context_lines = None diffs = _mdiff(fromlines, tolines, context_lines, linejunk=self._linejunk, charjunk=self._charjunk) if self._wrapcolumn: diffs = self._line_wrapper(diffs) (fromlist, tolist, flaglist) = self._collect_lines(diffs) (fromlist, tolist, flaglist, next_href, next_id) = self._convert_flags(fromlist, tolist, flaglist, context, numlines) s = [] fmt = (' <tr><td class="diff_next"%s>%s</td>%s' + '<td class="diff_next">%s</td>%s</tr>\n') for i in range(len(flaglist)): if (flaglist[i] is None): if (i > 0): s.append(' </tbody> \n <tbody>\n') else: s.append((fmt % (next_id[i], next_href[i], fromlist[i], next_href[i], tolist[i]))) if (fromdesc or todesc): header_row = ('<thead><tr>%s%s%s%s</tr></thead>' % ('<th class="diff_next"><br /></th>', ('<th colspan="2" class="diff_header">%s</th>' % fromdesc), '<th class="diff_next"><br /></th>', ('<th colspan="2" class="diff_header">%s</th>' % todesc))) else: header_row = '' table = (self._table_template % dict(data_rows=''.join(s), header_row=header_row, prefix=self._prefix[1])) return table.replace('\x00+', '<span class="diff_add">').replace('\x00-', '<span class="diff_sub">').replace('\x00^', '<span class="diff_chg">').replace('\x01', '</span>').replace(' DCTB ', '&nbsp;')
'This is an abstract class.'
def __init__(self):
if (self.__class__ is BaseSet): raise TypeError, 'BaseSet is an abstract class. Use Set or ImmutableSet.'
'Return the number of elements of a set.'
def __len__(self):
return len(self._data)
'Return string representation of a set. This looks like \'Set([<list of elements>])\'.'
def __repr__(self):
return self._repr()
'Return an iterator over the elements or a set. This is the keys iterator for the underlying dict.'
def __iter__(self):
return self._data.iterkeys()
'Return a shallow copy of a set.'
def copy(self):
result = self.__class__() result._data.update(self._data) return result
'Return a deep copy of a set; used by copy module.'
def __deepcopy__(self, memo):
from copy import deepcopy result = self.__class__() memo[id(self)] = result data = result._data value = True for elt in self: data[deepcopy(elt, memo)] = value return result
'Return the union of two sets as a new set. (I.e. all elements that are in either set.)'
def __or__(self, other):
if (not isinstance(other, BaseSet)): return NotImplemented return self.union(other)
'Return the union of two sets as a new set. (I.e. all elements that are in either set.)'
def union(self, other):
result = self.__class__(self) result._update(other) return result
'Return the intersection of two sets as a new set. (I.e. all elements that are in both sets.)'
def __and__(self, other):
if (not isinstance(other, BaseSet)): return NotImplemented return self.intersection(other)
'Return the intersection of two sets as a new set. (I.e. all elements that are in both sets.)'
def intersection(self, other):
if (not isinstance(other, BaseSet)): other = Set(other) if (len(self) <= len(other)): (little, big) = (self, other) else: (little, big) = (other, self) common = ifilter(big._data.__contains__, little) return self.__class__(common)
'Return the symmetric difference of two sets as a new set. (I.e. all elements that are in exactly one of the sets.)'
def __xor__(self, other):
if (not isinstance(other, BaseSet)): return NotImplemented return self.symmetric_difference(other)
'Return the symmetric difference of two sets as a new set. (I.e. all elements that are in exactly one of the sets.)'
def symmetric_difference(self, other):
result = self.__class__() data = result._data value = True selfdata = self._data try: otherdata = other._data except AttributeError: otherdata = Set(other)._data for elt in ifilterfalse(otherdata.__contains__, selfdata): data[elt] = value for elt in ifilterfalse(selfdata.__contains__, otherdata): data[elt] = value return result
'Return the difference of two sets as a new Set. (I.e. all elements that are in this set and not in the other.)'
def __sub__(self, other):
if (not isinstance(other, BaseSet)): return NotImplemented return self.difference(other)
'Return the difference of two sets as a new Set. (I.e. all elements that are in this set and not in the other.)'
def difference(self, other):
result = self.__class__() data = result._data try: otherdata = other._data except AttributeError: otherdata = Set(other)._data value = True for elt in ifilterfalse(otherdata.__contains__, self): data[elt] = value return result
'Report whether an element is a member of a set. (Called in response to the expression `element in self\'.)'
def __contains__(self, element):
try: return (element in self._data) except TypeError: transform = getattr(element, '__as_temporarily_immutable__', None) if (transform is None): raise return (transform() in self._data)
'Report whether another set contains this set.'
def issubset(self, other):
self._binary_sanity_check(other) if (len(self) > len(other)): return False for elt in ifilterfalse(other._data.__contains__, self): return False return True
'Report whether this set contains another set.'
def issuperset(self, other):
self._binary_sanity_check(other) if (len(self) < len(other)): return False for elt in ifilterfalse(self._data.__contains__, other): return False return True
'Construct an immutable set from an optional iterable.'
def __init__(self, iterable=None):
self._hashcode = None self._data = {} if (iterable is not None): self._update(iterable)
'Construct a set from an optional iterable.'
def __init__(self, iterable=None):
self._data = {} if (iterable is not None): self._update(iterable)
'Update a set with the union of itself and another.'
def __ior__(self, other):
self._binary_sanity_check(other) self._data.update(other._data) return self
'Update a set with the union of itself and another.'
def union_update(self, other):
self._update(other)
'Update a set with the intersection of itself and another.'
def __iand__(self, other):
self._binary_sanity_check(other) self._data = (self & other)._data return self
'Update a set with the intersection of itself and another.'
def intersection_update(self, other):
if isinstance(other, BaseSet): self &= other else: self._data = self.intersection(other)._data
'Update a set with the symmetric difference of itself and another.'
def __ixor__(self, other):
self._binary_sanity_check(other) self.symmetric_difference_update(other) return self
'Update a set with the symmetric difference of itself and another.'
def symmetric_difference_update(self, other):
data = self._data value = True if (not isinstance(other, BaseSet)): other = Set(other) if (self is other): self.clear() for elt in other: if (elt in data): del data[elt] else: data[elt] = value
'Remove all elements of another set from this set.'
def __isub__(self, other):
self._binary_sanity_check(other) self.difference_update(other) return self
'Remove all elements of another set from this set.'
def difference_update(self, other):
data = self._data if (not isinstance(other, BaseSet)): other = Set(other) if (self is other): self.clear() for elt in ifilter(data.__contains__, other): del data[elt]
'Add all values from an iterable (such as a list or file).'
def update(self, iterable):
self._update(iterable)
'Remove all elements from this set.'
def clear(self):
self._data.clear()
'Add an element to a set. This has no effect if the element is already present.'
def add(self, element):
try: self._data[element] = True except TypeError: transform = getattr(element, '__as_immutable__', None) if (transform is None): raise self._data[transform()] = True
'Remove an element from a set; it must be a member. If the element is not a member, raise a KeyError.'
def remove(self, element):
try: del self._data[element] except TypeError: transform = getattr(element, '__as_temporarily_immutable__', None) if (transform is None): raise del self._data[transform()]
'Remove an element from a set if it is a member. If the element is not a member, do nothing.'
def discard(self, element):
try: self.remove(element) except KeyError: pass
'Remove and return an arbitrary set element.'
def pop(self):
return self._data.popitem()[0]
'Override server_bind to store the server name.'
def server_bind(self):
HTTPServer.server_bind(self) self.setup_environ()
'Handle a single HTTP request'
def handle(self):
self.raw_requestline = self.rfile.readline(65537) if (len(self.raw_requestline) > 65536): self.requestline = '' self.request_version = '' self.command = '' self.send_error(414) return if (not self.parse_request()): return handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ()) handler.request_handler = self handler.run(self.server.get_app())
'Return the total number of headers, including duplicates.'
def __len__(self):
return len(self._headers)
'Set the value of a header.'
def __setitem__(self, name, val):
del self[name] self._headers.append((name, val))
'Delete all occurrences of a header, if present. Does *not* raise an exception if the header is missing.'
def __delitem__(self, name):
name = name.lower() self._headers[:] = [kv for kv in self._headers if (kv[0].lower() != name)]
'Get the first header value for \'name\' Return None if the header is missing instead of raising an exception. Note that if the header appeared multiple times, the first exactly which occurrence gets returned is undefined. Use getall() to get all the values matching a header field name.'
def __getitem__(self, name):
return self.get(name)
'Return true if the message contains the header.'
def has_key(self, name):
return (self.get(name) is not None)
'Return a list of all the values for the named field. These will be sorted in the order they appeared in the original header list or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. If no fields exist with the given name, returns an empty list.'
def get_all(self, name):
name = name.lower() return [kv[1] for kv in self._headers if (kv[0].lower() == name)]
'Get the first header value for \'name\', or return \'default\''
def get(self, name, default=None):
name = name.lower() for (k, v) in self._headers: if (k.lower() == name): return v return default
'Return a list of all the header field names. These will be sorted in the order they appeared in the original header list, or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list.'
def keys(self):
return [k for (k, v) in self._headers]
'Return a list of all header values. These will be sorted in the order they appeared in the original header list, or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list.'
def values(self):
return [v for (k, v) in self._headers]
'Get all the header fields and values. These will be sorted in the order they were in the original header list, or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list.'
def items(self):
return self._headers[:]
'str() returns the formatted headers, complete with end line, suitable for direct HTTP transmission.'
def __str__(self):
return '\r\n'.join(([('%s: %s' % kv) for kv in self._headers] + ['', '']))
'Return first matching header value for \'name\', or \'value\' If there is no header named \'name\', add a new header with name \'name\' and value \'value\'.'
def setdefault(self, name, value):
result = self.get(name) if (result is None): self._headers.append((name, value)) return value else: return result
'Extended header setting. _name is the header field to add. keyword arguments can be used to set additional parameters for the header field, with underscores converted to dashes. Normally the parameter will be added as key="value" unless value is None, in which case only the key will be added. Example: h.add_header(\'content-disposition\', \'attachment\', filename=\'bud.gif\') Note that unlike the corresponding \'email.message\' method, this does *not* handle \'(charset, language, value)\' tuples: all values must be strings or None.'
def add_header(self, _name, _value, **_params):
parts = [] if (_value is not None): parts.append(_value) for (k, v) in _params.items(): if (v is None): parts.append(k.replace('_', '-')) else: parts.append(_formatparam(k.replace('_', '-'), v)) self._headers.append((_name, '; '.join(parts)))
'Invoke the application'
def run(self, application):
try: self.setup_environ() self.result = application(self.environ, self.start_response) self.finish_response() except: try: self.handle_error() except: self.close() raise
'Set up the environment for one request'
def setup_environ(self):
env = self.environ = self.os_environ.copy() self.add_cgi_vars() env['wsgi.input'] = self.get_stdin() env['wsgi.errors'] = self.get_stderr() env['wsgi.version'] = self.wsgi_version env['wsgi.run_once'] = self.wsgi_run_once env['wsgi.url_scheme'] = self.get_scheme() env['wsgi.multithread'] = self.wsgi_multithread env['wsgi.multiprocess'] = self.wsgi_multiprocess if (self.wsgi_file_wrapper is not None): env['wsgi.file_wrapper'] = self.wsgi_file_wrapper if (self.origin_server and self.server_software): env.setdefault('SERVER_SOFTWARE', self.server_software)
'Send any iterable data, then close self and the iterable Subclasses intended for use in asynchronous servers will want to redefine this method, such that it sets up callbacks in the event loop to iterate over the data, and to call \'self.close()\' once the response is finished.'
def finish_response(self):
try: if ((not self.result_is_file()) or (not self.sendfile())): for data in self.result: self.write(data) self.finish_content() finally: self.close()
'Return the URL scheme being used'
def get_scheme(self):
return guess_scheme(self.environ)
'Compute Content-Length or switch to chunked encoding if possible'
def set_content_length(self):
try: blocks = len(self.result) except (TypeError, AttributeError, NotImplementedError): pass else: if (blocks == 1): self.headers['Content-Length'] = str(self.bytes_sent) return
'Make any necessary header changes or defaults Subclasses can extend this to add other defaults.'
def cleanup_headers(self):
if ('Content-Length' not in self.headers): self.set_content_length()
'\'start_response()\' callable as specified by PEP 333'
def start_response(self, status, headers, exc_info=None):
if exc_info: try: if self.headers_sent: raise exc_info[0], exc_info[1], exc_info[2] finally: exc_info = None elif (self.headers is not None): raise AssertionError('Headers already set!') assert (type(status) is StringType), 'Status must be a string' assert (len(status) >= 4), 'Status must be at least 4 characters' assert int(status[:3]), 'Status message must begin w/3-digit code' assert (status[3] == ' '), 'Status message must have a space after code' if __debug__: for (name, val) in headers: assert (type(name) is StringType), 'Header names must be strings' assert (type(val) is StringType), 'Header values must be strings' assert (not is_hop_by_hop(name)), 'Hop-by-hop headers not allowed' self.status = status self.headers = self.headers_class(headers) return self.write
'Transmit version/status/date/server, via self._write()'
def send_preamble(self):
if self.origin_server: if self.client_is_modern(): self._write(('HTTP/%s %s\r\n' % (self.http_version, self.status))) if ('Date' not in self.headers): self._write(('Date: %s\r\n' % format_date_time(time.time()))) if (self.server_software and ('Server' not in self.headers)): self._write(('Server: %s\r\n' % self.server_software)) else: self._write(('Status: %s\r\n' % self.status))
'\'write()\' callable as specified by PEP 333'
def write(self, data):
assert (type(data) is StringType), 'write() argument must be string' if (not self.status): raise AssertionError('write() before start_response()') elif (not self.headers_sent): self.bytes_sent = len(data) self.send_headers() else: self.bytes_sent += len(data) self._write(data) self._flush()
'Platform-specific file transmission Override this method in subclasses to support platform-specific file transmission. It is only called if the application\'s return iterable (\'self.result\') is an instance of \'self.wsgi_file_wrapper\'. This method should return a true value if it was able to actually transmit the wrapped file-like object using a platform-specific approach. It should return a false value if normal iteration should be used instead. An exception can be raised to indicate that transmission was attempted, but failed. NOTE: this method should call \'self.send_headers()\' if \'self.headers_sent\' is false and it is going to attempt direct transmission of the file.'
def sendfile(self):
return False
'Ensure headers and content have both been sent'
def finish_content(self):
if (not self.headers_sent): self.headers.setdefault('Content-Length', '0') self.send_headers() else: pass
'Close the iterable (if needed) and reset all instance vars Subclasses may want to also drop the client connection.'
def close(self):
try: if hasattr(self.result, 'close'): self.result.close() finally: self.result = self.headers = self.status = self.environ = None self.bytes_sent = 0 self.headers_sent = False
'Transmit headers to the client, via self._write()'
def send_headers(self):
self.cleanup_headers() self.headers_sent = True if ((not self.origin_server) or self.client_is_modern()): self.send_preamble() self._write(str(self.headers))