desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Isolate change clusters by eliminating ranges with no changes. Return a generator of groups with upto n lines of context. Each group is in the same format as returned by get_opcodes(). >>> from pprint import pprint >>> a = map(str, range(1,40)) >>> b = a[:] >>> b[8:8] = [\'i\'] # Make an insertion >>> b[20] += \'x\' # Make a replacement >>> b[23:28] = [] # Make a deletion >>> b[30] += \'y\' # Make another replacement >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes())) [[(\'equal\', 5, 8, 5, 8), (\'insert\', 8, 8, 8, 9), (\'equal\', 8, 11, 9, 12)], [(\'equal\', 16, 19, 17, 20), (\'replace\', 19, 20, 20, 21), (\'equal\', 20, 22, 21, 23), (\'delete\', 22, 27, 23, 23), (\'equal\', 27, 30, 23, 26)], [(\'equal\', 31, 34, 27, 30), (\'replace\', 34, 35, 30, 31), (\'equal\', 35, 38, 31, 34)]]'
def get_grouped_opcodes(self, n=3):
codes = self.get_opcodes() if (not codes): codes = [('equal', 0, 1, 0, 1)] if (codes[0][0] == 'equal'): (tag, i1, i2, j1, j2) = codes[0] codes[0] = (tag, max(i1, (i2 - n)), i2, max(j1, (j2 - n)), j2) if (codes[(-1)][0] == 'equal'): (tag, i1, i2, j1, j2) = codes[(-1)] codes[(-1)] = (tag, i1, min(i2, (i1 + n)), j1, min(j2, (j1 + n))) nn = (n + n) group = [] for (tag, i1, i2, j1, j2) in codes: if ((tag == 'equal') and ((i2 - i1) > nn)): group.append((tag, i1, min(i2, (i1 + n)), j1, min(j2, (j1 + n)))) (yield group) group = [] (i1, j1) = (max(i1, (i2 - n)), max(j1, (j2 - n))) group.append((tag, i1, i2, j1, j2)) if (group and (not ((len(group) == 1) and (group[0][0] == 'equal')))): (yield group)
'Return a measure of the sequences\' similarity (float in [0,1]). Where T is the total number of elements in both sequences, and M is the number of matches, this is 2.0*M / T. Note that this is 1 if the sequences are identical, and 0 if they have nothing in common. .ratio() is expensive to compute if you haven\'t already computed .get_matching_blocks() or .get_opcodes(), in which case you may want to try .quick_ratio() or .real_quick_ratio() first to get an upper bound. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.quick_ratio() 0.75 >>> s.real_quick_ratio() 1.0'
def ratio(self):
matches = reduce((lambda sum, triple: (sum + triple[(-1)])), self.get_matching_blocks(), 0) return _calculate_ratio(matches, (len(self.a) + len(self.b)))
'Return an upper bound on ratio() relatively quickly. This isn\'t defined beyond that it is an upper bound on .ratio(), and is faster to compute.'
def quick_ratio(self):
if (self.fullbcount is None): self.fullbcount = fullbcount = {} for elt in self.b: fullbcount[elt] = (fullbcount.get(elt, 0) + 1) fullbcount = self.fullbcount avail = {} (availhas, matches) = (avail.__contains__, 0) for elt in self.a: if availhas(elt): numb = avail[elt] else: numb = fullbcount.get(elt, 0) avail[elt] = (numb - 1) if (numb > 0): matches = (matches + 1) return _calculate_ratio(matches, (len(self.a) + len(self.b)))
'Return an upper bound on ratio() very quickly. This isn\'t defined beyond that it is an upper bound on .ratio(), and is faster to compute than either .ratio() or .quick_ratio().'
def real_quick_ratio(self):
(la, lb) = (len(self.a), len(self.b)) return _calculate_ratio(min(la, lb), (la + lb))
'Construct a text differencer, with optional filters. The two optional keyword parameters are for filter functions: - `linejunk`: A function that should accept a single string argument, and return true iff the string is junk. The module-level function `IS_LINE_JUNK` may be used to filter out lines without visible characters, except for at most one splat (\'#\'). It is recommended to leave linejunk None; as of Python 2.3, the underlying SequenceMatcher class has grown an adaptive notion of "noise" lines that\'s better than any static definition the author has ever been able to craft. - `charjunk`: A function that should accept a string of length 1. The module-level function `IS_CHARACTER_JUNK` may be used to filter out whitespace characters (a blank or tab; **note**: bad idea to include newline in this!). Use of IS_CHARACTER_JUNK is recommended.'
def __init__(self, linejunk=None, charjunk=None):
self.linejunk = linejunk self.charjunk = charjunk
'Compare two sequences of lines; generate the resulting delta. Each sequence must contain individual single-line strings ending with newlines. Such sequences can be obtained from the `readlines()` method of file-like objects. The delta generated also consists of newline- terminated strings, ready to be printed as-is via the writeline() method of a file-like object. Example: >>> print \'\'.join(Differ().compare(\'one\ntwo\nthree\n\'.splitlines(1), ... \'ore\ntree\nemu\n\'.splitlines(1))), - one + ore - two - three + tree + emu'
def compare(self, a, b):
cruncher = SequenceMatcher(self.linejunk, a, b) for (tag, alo, ahi, blo, bhi) in cruncher.get_opcodes(): if (tag == 'replace'): g = self._fancy_replace(a, alo, ahi, b, blo, bhi) elif (tag == 'delete'): g = self._dump('-', a, alo, ahi) elif (tag == 'insert'): g = self._dump('+', b, blo, bhi) elif (tag == 'equal'): g = self._dump(' ', a, alo, ahi) else: raise ValueError, ('unknown tag %r' % (tag,)) for line in g: (yield line)
'Generate comparison results for a same-tagged range.'
def _dump(self, tag, x, lo, hi):
for i in xrange(lo, hi): (yield ('%s %s' % (tag, x[i])))
'When replacing one block of lines with another, search the blocks for *similar* lines; the best-matching pair (if any) is used as a synch point, and intraline difference marking is done on the similar pair. Lots of work, but often worth it. Example: >>> d = Differ() >>> results = d._fancy_replace([\'abcDefghiJkl\n\'], 0, 1, ... [\'abcdefGhijkl\n\'], 0, 1) >>> print \'\'.join(results), - abcDefghiJkl + abcdefGhijkl'
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
(best_ratio, cutoff) = (0.74, 0.75) cruncher = SequenceMatcher(self.charjunk) (eqi, eqj) = (None, None) for j in xrange(blo, bhi): bj = b[j] cruncher.set_seq2(bj) for i in xrange(alo, ahi): ai = a[i] if (ai == bj): if (eqi is None): (eqi, eqj) = (i, j) continue cruncher.set_seq1(ai) if ((cruncher.real_quick_ratio() > best_ratio) and (cruncher.quick_ratio() > best_ratio) and (cruncher.ratio() > best_ratio)): (best_ratio, best_i, best_j) = (cruncher.ratio(), i, j) if (best_ratio < cutoff): if (eqi is None): for line in self._plain_replace(a, alo, ahi, b, blo, bhi): (yield line) return (best_i, best_j, best_ratio) = (eqi, eqj, 1.0) else: eqi = None for line in self._fancy_helper(a, alo, best_i, b, blo, best_j): (yield line) (aelt, belt) = (a[best_i], b[best_j]) if (eqi is None): atags = btags = '' cruncher.set_seqs(aelt, belt) for (tag, ai1, ai2, bj1, bj2) in cruncher.get_opcodes(): (la, lb) = ((ai2 - ai1), (bj2 - bj1)) if (tag == 'replace'): atags += ('^' * la) btags += ('^' * lb) elif (tag == 'delete'): atags += ('-' * la) elif (tag == 'insert'): btags += ('+' * lb) elif (tag == 'equal'): atags += (' ' * la) btags += (' ' * lb) else: raise ValueError, ('unknown tag %r' % (tag,)) for line in self._qformat(aelt, belt, atags, btags): (yield line) else: (yield (' ' + aelt)) for line in self._fancy_helper(a, (best_i + 1), ahi, b, (best_j + 1), bhi): (yield line)
'Format "?" output and deal with leading tabs. Example: >>> d = Differ() >>> results = d._qformat(\'\tabcDefghiJkl\n\', \'\tabcdefGhijkl\n\', >>> for line in results: print repr(line) \'- \tabcDefghiJkl\n\' \'? \t ^ ^ ^\n\' \'+ \tabcdefGhijkl\n\' \'? \t ^ ^ ^\n\''
def _qformat(self, aline, bline, atags, btags):
common = min(_count_leading(aline, ' DCTB '), _count_leading(bline, ' DCTB ')) common = min(common, _count_leading(atags[:common], ' ')) common = min(common, _count_leading(btags[:common], ' ')) atags = atags[common:].rstrip() btags = btags[common:].rstrip() (yield ('- ' + aline)) if atags: (yield ('? %s%s\n' % ((' DCTB ' * common), atags))) (yield ('+ ' + bline)) if btags: (yield ('? %s%s\n' % ((' DCTB ' * common), btags)))
'HtmlDiff instance initializer Arguments: tabsize -- tab stop spacing, defaults to 8. wrapcolumn -- column number where lines are broken and wrapped, defaults to None where lines are not wrapped. linejunk,charjunk -- keyword arguments passed into ndiff() (used to by HtmlDiff() to generate the side by side HTML differences). See ndiff() documentation for argument default values and descriptions.'
def __init__(self, tabsize=8, wrapcolumn=None, linejunk=None, charjunk=IS_CHARACTER_JUNK):
self._tabsize = tabsize self._wrapcolumn = wrapcolumn self._linejunk = linejunk self._charjunk = charjunk
'Returns HTML file of side by side comparison with change highlights Arguments: fromlines -- list of "from" lines tolines -- list of "to" lines fromdesc -- "from" file column header string todesc -- "to" file column header string context -- set to True for contextual differences (defaults to False which shows full differences). numlines -- number of context lines. When context is set True, controls number of lines displayed before and after the change. When context is False, controls the number of lines to place the "next" link anchors before the next change (so click of "next" link jumps to just before the change).'
def make_file(self, fromlines, tolines, fromdesc='', todesc='', context=False, numlines=5):
return (self._file_template % dict(styles=self._styles, legend=self._legend, table=self.make_table(fromlines, tolines, fromdesc, todesc, context=context, numlines=numlines)))
'Returns from/to line lists with tabs expanded and newlines removed. Instead of tab characters being replaced by the number of spaces needed to fill in to the next tab stop, this function will fill the space with tab characters. This is done so that the difference algorithms can identify changes in a file when tabs are replaced by spaces and vice versa. At the end of the HTML generation, the tab characters will be replaced with a nonbreakable space.'
def _tab_newline_replace(self, fromlines, tolines):
def expand_tabs(line): line = line.replace(' ', '\x00') line = line.expandtabs(self._tabsize) line = line.replace(' ', ' DCTB ') return line.replace('\x00', ' ').rstrip('\n') fromlines = [expand_tabs(line) for line in fromlines] tolines = [expand_tabs(line) for line in tolines] return (fromlines, tolines)
'Builds list of text lines by splitting text lines at wrap point This function will determine if the input text line needs to be wrapped (split) into separate lines. If so, the first wrap point will be determined and the first line appended to the output text line list. This function is used recursively to handle the second part of the split line to further split it.'
def _split_line(self, data_list, line_num, text):
if (not line_num): data_list.append((line_num, text)) return size = len(text) max = self._wrapcolumn if ((size <= max) or ((size - (text.count('\x00') * 3)) <= max)): data_list.append((line_num, text)) return i = 0 n = 0 mark = '' while ((n < max) and (i < size)): if (text[i] == '\x00'): i += 1 mark = text[i] i += 1 elif (text[i] == '\x01'): i += 1 mark = '' else: i += 1 n += 1 line1 = text[:i] line2 = text[i:] if mark: line1 = (line1 + '\x01') line2 = (('\x00' + mark) + line2) data_list.append((line_num, line1)) self._split_line(data_list, '>', line2)
'Returns iterator that splits (wraps) mdiff text lines'
def _line_wrapper(self, diffs):
for (fromdata, todata, flag) in diffs: if (flag is None): (yield (fromdata, todata, flag)) continue ((fromline, fromtext), (toline, totext)) = (fromdata, todata) (fromlist, tolist) = ([], []) self._split_line(fromlist, fromline, fromtext) self._split_line(tolist, toline, totext) while (fromlist or tolist): if fromlist: fromdata = fromlist.pop(0) else: fromdata = ('', ' ') if tolist: todata = tolist.pop(0) else: todata = ('', ' ') (yield (fromdata, todata, flag))
'Collects mdiff output into separate lists Before storing the mdiff from/to data into a list, it is converted into a single line of text with HTML markup.'
def _collect_lines(self, diffs):
(fromlist, tolist, flaglist) = ([], [], []) for (fromdata, todata, flag) in diffs: try: fromlist.append(self._format_line(0, flag, *fromdata)) tolist.append(self._format_line(1, flag, *todata)) except TypeError: fromlist.append(None) tolist.append(None) flaglist.append(flag) return (fromlist, tolist, flaglist)
'Returns HTML markup of "from" / "to" text lines side -- 0 or 1 indicating "from" or "to" text flag -- indicates if difference on line linenum -- line number (used for line number column) text -- line text to be marked up'
def _format_line(self, side, flag, linenum, text):
try: linenum = ('%d' % linenum) id = (' id="%s%s"' % (self._prefix[side], linenum)) except TypeError: id = '' text = text.replace('&', '&amp;').replace('>', '&gt;').replace('<', '&lt;') text = text.replace(' ', '&nbsp;').rstrip() return ('<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' % (id, linenum, text))
'Create unique anchor prefixes'
def _make_prefix(self):
fromprefix = ('from%d_' % HtmlDiff._default_prefix) toprefix = ('to%d_' % HtmlDiff._default_prefix) HtmlDiff._default_prefix += 1 self._prefix = [fromprefix, toprefix]
'Makes list of "next" links'
def _convert_flags(self, fromlist, tolist, flaglist, context, numlines):
toprefix = self._prefix[1] next_id = ([''] * len(flaglist)) next_href = ([''] * len(flaglist)) (num_chg, in_change) = (0, False) last = 0 for (i, flag) in enumerate(flaglist): if flag: if (not in_change): in_change = True last = i i = max([0, (i - numlines)]) next_id[i] = (' id="difflib_chg_%s_%d"' % (toprefix, num_chg)) num_chg += 1 next_href[last] = ('<a href="#difflib_chg_%s_%d">n</a>' % (toprefix, num_chg)) else: in_change = False if (not flaglist): flaglist = [False] next_id = [''] next_href = [''] last = 0 if context: fromlist = ['<td></td><td>&nbsp;No Differences Found&nbsp;</td>'] tolist = fromlist else: fromlist = tolist = ['<td></td><td>&nbsp;Empty File&nbsp;</td>'] if (not flaglist[0]): next_href[0] = ('<a href="#difflib_chg_%s_0">f</a>' % toprefix) next_href[last] = ('<a href="#difflib_chg_%s_top">t</a>' % toprefix) return (fromlist, tolist, flaglist, next_href, next_id)
'Returns HTML table of side by side comparison with change highlights Arguments: fromlines -- list of "from" lines tolines -- list of "to" lines fromdesc -- "from" file column header string todesc -- "to" file column header string context -- set to True for contextual differences (defaults to False which shows full differences). numlines -- number of context lines. When context is set True, controls number of lines displayed before and after the change. When context is False, controls the number of lines to place the "next" link anchors before the next change (so click of "next" link jumps to just before the change).'
def make_table(self, fromlines, tolines, fromdesc='', todesc='', context=False, numlines=5):
self._make_prefix() (fromlines, tolines) = self._tab_newline_replace(fromlines, tolines) if context: context_lines = numlines else: context_lines = None diffs = _mdiff(fromlines, tolines, context_lines, linejunk=self._linejunk, charjunk=self._charjunk) if self._wrapcolumn: diffs = self._line_wrapper(diffs) (fromlist, tolist, flaglist) = self._collect_lines(diffs) (fromlist, tolist, flaglist, next_href, next_id) = self._convert_flags(fromlist, tolist, flaglist, context, numlines) s = [] fmt = (' <tr><td class="diff_next"%s>%s</td>%s' + '<td class="diff_next">%s</td>%s</tr>\n') for i in range(len(flaglist)): if (flaglist[i] is None): if (i > 0): s.append(' </tbody> \n <tbody>\n') else: s.append((fmt % (next_id[i], next_href[i], fromlist[i], next_href[i], tolist[i]))) if (fromdesc or todesc): header_row = ('<thead><tr>%s%s%s%s</tr></thead>' % ('<th class="diff_next"><br /></th>', ('<th colspan="2" class="diff_header">%s</th>' % fromdesc), '<th class="diff_next"><br /></th>', ('<th colspan="2" class="diff_header">%s</th>' % todesc))) else: header_row = '' table = (self._table_template % dict(data_rows=''.join(s), header_row=header_row, prefix=self._prefix[1])) return table.replace('\x00+', '<span class="diff_add">').replace('\x00-', '<span class="diff_sub">').replace('\x00^', '<span class="diff_chg">').replace('\x01', '</span>').replace(' DCTB ', '&nbsp;')
'This is an abstract class.'
def __init__(self):
if (self.__class__ is BaseSet): raise TypeError, 'BaseSet is an abstract class. Use Set or ImmutableSet.'
'Return the number of elements of a set.'
def __len__(self):
return len(self._data)
'Return string representation of a set. This looks like \'Set([<list of elements>])\'.'
def __repr__(self):
return self._repr()
'Return an iterator over the elements or a set. This is the keys iterator for the underlying dict.'
def __iter__(self):
return self._data.iterkeys()
'Return a shallow copy of a set.'
def copy(self):
result = self.__class__() result._data.update(self._data) return result
'Return a deep copy of a set; used by copy module.'
def __deepcopy__(self, memo):
from copy import deepcopy result = self.__class__() memo[id(self)] = result data = result._data value = True for elt in self: data[deepcopy(elt, memo)] = value return result
'Return the union of two sets as a new set. (I.e. all elements that are in either set.)'
def __or__(self, other):
if (not isinstance(other, BaseSet)): return NotImplemented return self.union(other)
'Return the union of two sets as a new set. (I.e. all elements that are in either set.)'
def union(self, other):
result = self.__class__(self) result._update(other) return result
'Return the intersection of two sets as a new set. (I.e. all elements that are in both sets.)'
def __and__(self, other):
if (not isinstance(other, BaseSet)): return NotImplemented return self.intersection(other)
'Return the intersection of two sets as a new set. (I.e. all elements that are in both sets.)'
def intersection(self, other):
if (not isinstance(other, BaseSet)): other = Set(other) if (len(self) <= len(other)): (little, big) = (self, other) else: (little, big) = (other, self) common = ifilter(big._data.__contains__, little) return self.__class__(common)
'Return the symmetric difference of two sets as a new set. (I.e. all elements that are in exactly one of the sets.)'
def __xor__(self, other):
if (not isinstance(other, BaseSet)): return NotImplemented return self.symmetric_difference(other)
'Return the symmetric difference of two sets as a new set. (I.e. all elements that are in exactly one of the sets.)'
def symmetric_difference(self, other):
result = self.__class__() data = result._data value = True selfdata = self._data try: otherdata = other._data except AttributeError: otherdata = Set(other)._data for elt in ifilterfalse(otherdata.__contains__, selfdata): data[elt] = value for elt in ifilterfalse(selfdata.__contains__, otherdata): data[elt] = value return result
'Return the difference of two sets as a new Set. (I.e. all elements that are in this set and not in the other.)'
def __sub__(self, other):
if (not isinstance(other, BaseSet)): return NotImplemented return self.difference(other)
'Return the difference of two sets as a new Set. (I.e. all elements that are in this set and not in the other.)'
def difference(self, other):
result = self.__class__() data = result._data try: otherdata = other._data except AttributeError: otherdata = Set(other)._data value = True for elt in ifilterfalse(otherdata.__contains__, self): data[elt] = value return result
'Report whether an element is a member of a set. (Called in response to the expression `element in self\'.)'
def __contains__(self, element):
try: return (element in self._data) except TypeError: transform = getattr(element, '__as_temporarily_immutable__', None) if (transform is None): raise return (transform() in self._data)
'Report whether another set contains this set.'
def issubset(self, other):
self._binary_sanity_check(other) if (len(self) > len(other)): return False for elt in ifilterfalse(other._data.__contains__, self): return False return True
'Report whether this set contains another set.'
def issuperset(self, other):
self._binary_sanity_check(other) if (len(self) < len(other)): return False for elt in ifilterfalse(self._data.__contains__, other): return False return True
'Construct an immutable set from an optional iterable.'
def __init__(self, iterable=None):
self._hashcode = None self._data = {} if (iterable is not None): self._update(iterable)
'Construct a set from an optional iterable.'
def __init__(self, iterable=None):
self._data = {} if (iterable is not None): self._update(iterable)
'Update a set with the union of itself and another.'
def __ior__(self, other):
self._binary_sanity_check(other) self._data.update(other._data) return self
'Update a set with the union of itself and another.'
def union_update(self, other):
self._update(other)
'Update a set with the intersection of itself and another.'
def __iand__(self, other):
self._binary_sanity_check(other) self._data = (self & other)._data return self
'Update a set with the intersection of itself and another.'
def intersection_update(self, other):
if isinstance(other, BaseSet): self &= other else: self._data = self.intersection(other)._data
'Update a set with the symmetric difference of itself and another.'
def __ixor__(self, other):
self._binary_sanity_check(other) self.symmetric_difference_update(other) return self
'Update a set with the symmetric difference of itself and another.'
def symmetric_difference_update(self, other):
data = self._data value = True if (not isinstance(other, BaseSet)): other = Set(other) if (self is other): self.clear() for elt in other: if (elt in data): del data[elt] else: data[elt] = value
'Remove all elements of another set from this set.'
def __isub__(self, other):
self._binary_sanity_check(other) self.difference_update(other) return self
'Remove all elements of another set from this set.'
def difference_update(self, other):
data = self._data if (not isinstance(other, BaseSet)): other = Set(other) if (self is other): self.clear() for elt in ifilter(data.__contains__, other): del data[elt]
'Add all values from an iterable (such as a list or file).'
def update(self, iterable):
self._update(iterable)
'Remove all elements from this set.'
def clear(self):
self._data.clear()
'Add an element to a set. This has no effect if the element is already present.'
def add(self, element):
try: self._data[element] = True except TypeError: transform = getattr(element, '__as_immutable__', None) if (transform is None): raise self._data[transform()] = True
'Remove an element from a set; it must be a member. If the element is not a member, raise a KeyError.'
def remove(self, element):
try: del self._data[element] except TypeError: transform = getattr(element, '__as_temporarily_immutable__', None) if (transform is None): raise del self._data[transform()]
'Remove an element from a set if it is a member. If the element is not a member, do nothing.'
def discard(self, element):
try: self.remove(element) except KeyError: pass
'Remove and return an arbitrary set element.'
def pop(self):
return self._data.popitem()[0]
'Override server_bind to store the server name.'
def server_bind(self):
HTTPServer.server_bind(self) self.setup_environ()
'Handle a single HTTP request'
def handle(self):
self.raw_requestline = self.rfile.readline() if (not self.parse_request()): return handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ()) handler.request_handler = self handler.run(self.server.get_app())
'Return the total number of headers, including duplicates.'
def __len__(self):
return len(self._headers)
'Set the value of a header.'
def __setitem__(self, name, val):
del self[name] self._headers.append((name, val))
'Delete all occurrences of a header, if present. Does *not* raise an exception if the header is missing.'
def __delitem__(self, name):
name = name.lower() self._headers[:] = [kv for kv in self._headers if (kv[0].lower() != name)]
'Get the first header value for \'name\' Return None if the header is missing instead of raising an exception. Note that if the header appeared multiple times, the first exactly which occurrance gets returned is undefined. Use getall() to get all the values matching a header field name.'
def __getitem__(self, name):
return self.get(name)
'Return true if the message contains the header.'
def has_key(self, name):
return (self.get(name) is not None)
'Return a list of all the values for the named field. These will be sorted in the order they appeared in the original header list or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. If no fields exist with the given name, returns an empty list.'
def get_all(self, name):
name = name.lower() return [kv[1] for kv in self._headers if (kv[0].lower() == name)]
'Get the first header value for \'name\', or return \'default\''
def get(self, name, default=None):
name = name.lower() for (k, v) in self._headers: if (k.lower() == name): return v return default
'Return a list of all the header field names. These will be sorted in the order they appeared in the original header list, or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list.'
def keys(self):
return [k for (k, v) in self._headers]
'Return a list of all header values. These will be sorted in the order they appeared in the original header list, or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list.'
def values(self):
return [v for (k, v) in self._headers]
'Get all the header fields and values. These will be sorted in the order they were in the original header list, or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list.'
def items(self):
return self._headers[:]
'str() returns the formatted headers, complete with end line, suitable for direct HTTP transmission.'
def __str__(self):
return '\r\n'.join(([('%s: %s' % kv) for kv in self._headers] + ['', '']))
'Return first matching header value for \'name\', or \'value\' If there is no header named \'name\', add a new header with name \'name\' and value \'value\'.'
def setdefault(self, name, value):
result = self.get(name) if (result is None): self._headers.append((name, value)) return value else: return result
'Extended header setting. _name is the header field to add. keyword arguments can be used to set additional parameters for the header field, with underscores converted to dashes. Normally the parameter will be added as key="value" unless value is None, in which case only the key will be added. Example: h.add_header(\'content-disposition\', \'attachment\', filename=\'bud.gif\') Note that unlike the corresponding \'email.message\' method, this does *not* handle \'(charset, language, value)\' tuples: all values must be strings or None.'
def add_header(self, _name, _value, **_params):
parts = [] if (_value is not None): parts.append(_value) for (k, v) in _params.items(): if (v is None): parts.append(k.replace('_', '-')) else: parts.append(_formatparam(k.replace('_', '-'), v)) self._headers.append((_name, '; '.join(parts)))
'Invoke the application'
def run(self, application):
try: self.setup_environ() self.result = application(self.environ, self.start_response) self.finish_response() except: try: self.handle_error() except: self.close() raise
'Set up the environment for one request'
def setup_environ(self):
env = self.environ = self.os_environ.copy() self.add_cgi_vars() env['wsgi.input'] = self.get_stdin() env['wsgi.errors'] = self.get_stderr() env['wsgi.version'] = self.wsgi_version env['wsgi.run_once'] = self.wsgi_run_once env['wsgi.url_scheme'] = self.get_scheme() env['wsgi.multithread'] = self.wsgi_multithread env['wsgi.multiprocess'] = self.wsgi_multiprocess if (self.wsgi_file_wrapper is not None): env['wsgi.file_wrapper'] = self.wsgi_file_wrapper if (self.origin_server and self.server_software): env.setdefault('SERVER_SOFTWARE', self.server_software)
'Send any iterable data, then close self and the iterable Subclasses intended for use in asynchronous servers will want to redefine this method, such that it sets up callbacks in the event loop to iterate over the data, and to call \'self.close()\' once the response is finished.'
def finish_response(self):
if ((not self.result_is_file()) or (not self.sendfile())): for data in self.result: self.write(data) self.finish_content() self.close()
'Return the URL scheme being used'
def get_scheme(self):
return guess_scheme(self.environ)
'Compute Content-Length or switch to chunked encoding if possible'
def set_content_length(self):
try: blocks = len(self.result) except (TypeError, AttributeError, NotImplementedError): pass else: if (blocks == 1): self.headers['Content-Length'] = str(self.bytes_sent) return
'Make any necessary header changes or defaults Subclasses can extend this to add other defaults.'
def cleanup_headers(self):
if ('Content-Length' not in self.headers): self.set_content_length()
'\'start_response()\' callable as specified by PEP 333'
def start_response(self, status, headers, exc_info=None):
if exc_info: try: if self.headers_sent: raise exc_info[0], exc_info[1], exc_info[2] finally: exc_info = None elif (self.headers is not None): raise AssertionError('Headers already set!') assert (type(status) is StringType), 'Status must be a string' assert (len(status) >= 4), 'Status must be at least 4 characters' assert int(status[:3]), 'Status message must begin w/3-digit code' assert (status[3] == ' '), 'Status message must have a space after code' if __debug__: for (name, val) in headers: assert (type(name) is StringType), 'Header names must be strings' assert (type(val) is StringType), 'Header values must be strings' assert (not is_hop_by_hop(name)), 'Hop-by-hop headers not allowed' self.status = status self.headers = self.headers_class(headers) return self.write
'Transmit version/status/date/server, via self._write()'
def send_preamble(self):
if self.origin_server: if self.client_is_modern(): self._write(('HTTP/%s %s\r\n' % (self.http_version, self.status))) if ('Date' not in self.headers): self._write(('Date: %s\r\n' % format_date_time(time.time()))) if (self.server_software and ('Server' not in self.headers)): self._write(('Server: %s\r\n' % self.server_software)) else: self._write(('Status: %s\r\n' % self.status))
'\'write()\' callable as specified by PEP 333'
def write(self, data):
assert (type(data) is StringType), 'write() argument must be string' if (not self.status): raise AssertionError('write() before start_response()') elif (not self.headers_sent): self.bytes_sent = len(data) self.send_headers() else: self.bytes_sent += len(data) self._write(data) self._flush()
'Platform-specific file transmission Override this method in subclasses to support platform-specific file transmission. It is only called if the application\'s return iterable (\'self.result\') is an instance of \'self.wsgi_file_wrapper\'. This method should return a true value if it was able to actually transmit the wrapped file-like object using a platform-specific approach. It should return a false value if normal iteration should be used instead. An exception can be raised to indicate that transmission was attempted, but failed. NOTE: this method should call \'self.send_headers()\' if \'self.headers_sent\' is false and it is going to attempt direct transmission of the file.'
def sendfile(self):
return False
'Ensure headers and content have both been sent'
def finish_content(self):
if (not self.headers_sent): self.headers.setdefault('Content-Length', '0') self.send_headers() else: pass
'Close the iterable (if needed) and reset all instance vars Subclasses may want to also drop the client connection.'
def close(self):
try: if hasattr(self.result, 'close'): self.result.close() finally: self.result = self.headers = self.status = self.environ = None self.bytes_sent = 0 self.headers_sent = False
'Transmit headers to the client, via self._write()'
def send_headers(self):
self.cleanup_headers() self.headers_sent = True if ((not self.origin_server) or self.client_is_modern()): self.send_preamble() self._write(str(self.headers))
'True if \'self.result\' is an instance of \'self.wsgi_file_wrapper\''
def result_is_file(self):
wrapper = self.wsgi_file_wrapper return ((wrapper is not None) and isinstance(self.result, wrapper))
'True if client can accept status and headers'
def client_is_modern(self):
return (self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9')
'Log the \'exc_info\' tuple in the server log Subclasses may override to retarget the output or change its format.'
def log_exception(self, exc_info):
try: from traceback import print_exception stderr = self.get_stderr() print_exception(exc_info[0], exc_info[1], exc_info[2], self.traceback_limit, stderr) stderr.flush() finally: exc_info = None
'Log current error, and send error output to client if possible'
def handle_error(self):
self.log_exception(sys.exc_info()) if (not self.headers_sent): self.result = self.error_output(self.environ, self.start_response) self.finish_response()
'WSGI mini-app to create error output By default, this just uses the \'error_status\', \'error_headers\', and \'error_body\' attributes to generate an output page. It can be overridden in a subclass to dynamically generate diagnostics, choose an appropriate message for the user\'s preferred language, etc. Note, however, that it\'s not recommended from a security perspective to spit out diagnostics to any old user; ideally, you should have to do something special to enable diagnostic output, which is why we don\'t include any here!'
def error_output(self, environ, start_response):
start_response(self.error_status, self.error_headers[:], sys.exc_info()) return [self.error_body]
'Override in subclass to buffer data for send to client It\'s okay if this method actually transmits the data; BaseHandler just separates write and flush operations for greater efficiency when the underlying system actually has such a distinction.'
def _write(self, data):
raise NotImplementedError
'Override in subclass to force sending of recent \'_write()\' calls It\'s okay if this method is a no-op (i.e., if \'_write()\' actually sends the data.'
def _flush(self):
raise NotImplementedError
'Override in subclass to return suitable \'wsgi.input\''
def get_stdin(self):
raise NotImplementedError
'Override in subclass to return suitable \'wsgi.errors\''
def get_stderr(self):
raise NotImplementedError
'Override in subclass to insert CGI variables in \'self.environ\''
def add_cgi_vars(self):
raise NotImplementedError
'Serve a POST request. This is only implemented for CGI scripts.'
def do_POST(self):
if self.is_cgi(): self.run_cgi() else: self.send_error(501, 'Can only POST to CGI scripts')
'Version of send_head that support CGI scripts'
def send_head(self):
if self.is_cgi(): return self.run_cgi() else: return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
'Test whether self.path corresponds to a CGI script. Returns True and updates the cgi_info attribute to the tuple (dir, rest) if self.path requires running a CGI script. Returns False otherwise. If any exception is raised, the caller should assume that self.path was rejected as invalid and act accordingly. The default implementation tests whether the normalized url path begins with one of the strings in self.cgi_directories (and the next character is a \'/\' or the end of the string).'
def is_cgi(self):
splitpath = _url_collapse_path_split(self.path) if (splitpath[0] in self.cgi_directories): self.cgi_info = splitpath return True return False
'Test whether argument path is an executable file.'
def is_executable(self, path):
return executable(path)
'Test whether argument path is a Python script.'
def is_python(self, path):
(head, tail) = os.path.splitext(path) return (tail.lower() in ('.py', '.pyw'))
'Execute a CGI script.'
def run_cgi(self):
path = self.path (dir, rest) = self.cgi_info i = path.find('/', (len(dir) + 1)) while (i >= 0): nextdir = path[:i] nextrest = path[(i + 1):] scriptdir = self.translate_path(nextdir) if os.path.isdir(scriptdir): (dir, rest) = (nextdir, nextrest) i = path.find('/', (len(dir) + 1)) else: break i = rest.rfind('?') if (i >= 0): (rest, query) = (rest[:i], rest[(i + 1):]) else: query = '' i = rest.find('/') if (i >= 0): (script, rest) = (rest[:i], rest[i:]) else: (script, rest) = (rest, '') scriptname = ((dir + '/') + script) scriptfile = self.translate_path(scriptname) if (not os.path.exists(scriptfile)): self.send_error(404, ('No such CGI script (%r)' % scriptname)) return if (not os.path.isfile(scriptfile)): self.send_error(403, ('CGI script is not a plain file (%r)' % scriptname)) return ispy = self.is_python(scriptname) if (not ispy): if (not (self.have_fork or self.have_popen2 or self.have_popen3)): self.send_error(403, ('CGI script is not a Python script (%r)' % scriptname)) return if (not self.is_executable(scriptfile)): self.send_error(403, ('CGI script is not executable (%r)' % scriptname)) return env = copy.deepcopy(os.environ) env['SERVER_SOFTWARE'] = self.version_string() env['SERVER_NAME'] = self.server.server_name env['GATEWAY_INTERFACE'] = 'CGI/1.1' env['SERVER_PROTOCOL'] = self.protocol_version env['SERVER_PORT'] = str(self.server.server_port) env['REQUEST_METHOD'] = self.command uqrest = urllib.unquote(rest) env['PATH_INFO'] = uqrest env['PATH_TRANSLATED'] = self.translate_path(uqrest) env['SCRIPT_NAME'] = scriptname if query: env['QUERY_STRING'] = query host = self.address_string() if (host != self.client_address[0]): env['REMOTE_HOST'] = host env['REMOTE_ADDR'] = self.client_address[0] authorization = self.headers.getheader('authorization') if authorization: authorization = authorization.split() if (len(authorization) == 2): import base64, binascii env['AUTH_TYPE'] = authorization[0] if (authorization[0].lower() == 'basic'): try: authorization = base64.decodestring(authorization[1]) except binascii.Error: pass else: authorization = authorization.split(':') if (len(authorization) == 2): env['REMOTE_USER'] = authorization[0] if (self.headers.typeheader is None): env['CONTENT_TYPE'] = self.headers.type else: env['CONTENT_TYPE'] = self.headers.typeheader length = self.headers.getheader('content-length') if length: env['CONTENT_LENGTH'] = length referer = self.headers.getheader('referer') if referer: env['HTTP_REFERER'] = referer accept = [] for line in self.headers.getallmatchingheaders('accept'): if (line[:1] in ' DCTB \n\r '): accept.append(line.strip()) else: accept = (accept + line[7:].split(',')) env['HTTP_ACCEPT'] = ','.join(accept) ua = self.headers.getheader('user-agent') if ua: env['HTTP_USER_AGENT'] = ua co = filter(None, self.headers.getheaders('cookie')) if co: env['HTTP_COOKIE'] = ', '.join(co) for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'): env.setdefault(k, '') self.send_response(200, 'Script output follows') decoded_query = query.replace('+', ' ') if self.have_fork: args = [script] if ('=' not in decoded_query): args.append(decoded_query) nobody = nobody_uid() self.wfile.flush() pid = os.fork() if (pid != 0): (pid, sts) = os.waitpid(pid, 0) while select.select([self.rfile], [], [], 0)[0]: if (not self.rfile.read(1)): break if sts: self.log_error('CGI script exit status %#x', sts) return try: try: os.setuid(nobody) except os.error: pass os.dup2(self.rfile.fileno(), 0) os.dup2(self.wfile.fileno(), 1) os.execve(scriptfile, args, env) except: self.server.handle_error(self.request, self.client_address) os._exit(127) else: import subprocess cmdline = [scriptfile] if self.is_python(scriptfile): interp = sys.executable if interp.lower().endswith('w.exe'): interp = (interp[:(-5)] + interp[(-4):]) cmdline = ([interp, '-u'] + cmdline) if ('=' not in query): cmdline.append(query) self.log_message('command: %s', subprocess.list2cmdline(cmdline)) try: nbytes = int(length) except (TypeError, ValueError): nbytes = 0 p = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if ((self.command.lower() == 'post') and (nbytes > 0)): data = self.rfile.read(nbytes) else: data = None while select.select([self.rfile._sock], [], [], 0)[0]: if (not self.rfile._sock.recv(1)): break (stdout, stderr) = p.communicate(data) self.wfile.write(stdout) if stderr: self.log_error('%s', stderr) p.stderr.close() p.stdout.close() status = p.returncode if status: self.log_error('CGI script exit status %#x', status) else: self.log_message('CGI script exited OK')
'Override this method to support alternative .mo formats.'
def _parse(self, fp):
unpack = struct.unpack filename = getattr(fp, 'name', '') self._catalog = catalog = {} self.plural = (lambda n: int((n != 1))) buf = fp.read() buflen = len(buf) magic = unpack('<I', buf[:4])[0] if (magic == self.LE_MAGIC): (version, msgcount, masteridx, transidx) = unpack('<4I', buf[4:20]) ii = '<II' elif (magic == self.BE_MAGIC): (version, msgcount, masteridx, transidx) = unpack('>4I', buf[4:20]) ii = '>II' else: raise IOError(0, 'Bad magic number', filename) for i in xrange(0, msgcount): (mlen, moff) = unpack(ii, buf[masteridx:(masteridx + 8)]) mend = (moff + mlen) (tlen, toff) = unpack(ii, buf[transidx:(transidx + 8)]) tend = (toff + tlen) if ((mend < buflen) and (tend < buflen)): msg = buf[moff:mend] tmsg = buf[toff:tend] else: raise IOError(0, 'File is corrupt', filename) if (mlen == 0): lastk = k = None for item in tmsg.splitlines(): item = item.strip() if (not item): continue if (':' in item): (k, v) = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v lastk = k elif lastk: self._info[lastk] += ('\n' + item) if (k == 'content-type'): self._charset = v.split('charset=')[1] elif (k == 'plural-forms'): v = v.split(';') plural = v[1].split('plural=')[1] self.plural = c2py(plural) if ('\x00' in msg): (msgid1, msgid2) = msg.split('\x00') tmsg = tmsg.split('\x00') if self._charset: msgid1 = unicode(msgid1, self._charset) tmsg = [unicode(x, self._charset) for x in tmsg] for i in range(len(tmsg)): catalog[(msgid1, i)] = tmsg[i] else: if self._charset: msg = unicode(msg, self._charset) tmsg = unicode(tmsg, self._charset) catalog[msg] = tmsg masteridx += 8 transidx += 8
'Construct an instance of the class from any iterable input. Must override this method if the class constructor signature does not accept an iterable for an input.'
@classmethod def _from_iterable(cls, it):
return cls(it)
'Compute the hash value of a set. Note that we don\'t define __hash__: not all sets are hashable. But if you define a hashable set type, its __hash__ should call this function. This must be compatible __eq__. All sets ought to compare equal if they contain the same elements, regardless of how they are implemented, and regardless of the order of the elements; so there\'s not much freedom for __eq__ or __hash__. We match the algorithm used by the built-in frozenset type.'
def _hash(self):
MAX = sys.maxint MASK = ((2 * MAX) + 1) n = len(self) h = (1927868237 * (n + 1)) h &= MASK for x in self: hx = hash(x) h ^= (((hx ^ (hx << 16)) ^ 89869747) * 3644798167) h &= MASK h = ((h * 69069) + 907133923) h &= MASK if (h > MAX): h -= (MASK + 1) if (h == (-1)): h = 590923713 return h
'Add an element.'
@abstractmethod def add(self, value):
raise NotImplementedError