rem
stringlengths 1
322k
| add
stringlengths 0
2.05M
| context
stringlengths 4
228k
| meta
stringlengths 156
215
|
---|---|---|---|
ctype = msg.get_type() if ctype is None: ctype = msg.get_default_type() assert ctype in ('text/plain', 'message/rfc822') | ctype = msg.get_content_type() | def _dispatch(self, msg): # Get the Content-Type: for the message, then try to dispatch to # self._handle_<maintype>_<subtype>(). If there's no handler for the # full MIME type, then dispatch to self._handle_<maintype>(). If # that's missing too, then dispatch to self._writeBody(). ctype = msg.get_type() if ctype is None: # No Content-Type: header so use the default type, which must be # either text/plain or message/rfc822. ctype = msg.get_default_type() assert ctype in ('text/plain', 'message/rfc822') # We do have a Content-Type: header. main, sub = ctype.split('/') specific = UNDERSCORE.join((main, sub)).replace('-', '_') meth = getattr(self, '_handle_' + specific, None) if meth is None: generic = main.replace('-', '_') meth = getattr(self, '_handle_' + generic, None) if meth is None: meth = self._writeBody meth(msg) | a82904d5d7e820835cb235c37fcc446b329e3df0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/a82904d5d7e820835cb235c37fcc446b329e3df0/Generator.py |
if DEBUG: sys.stderr.write("%s<%s> at %s\n" % (" "*depth, name, point)) | dbgmsg("%s<%s> at %s" % (" "*depth, name, point)) | def pushing(name, point, depth): if DEBUG: sys.stderr.write("%s<%s> at %s\n" % (" "*depth, name, point)) | 6d0931003cfff1a870c8f6ddb9287b190f316868 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6d0931003cfff1a870c8f6ddb9287b190f316868/latex2esis.py |
if DEBUG: sys.stderr.write("%s</%s> at %s\n" % (" "*depth, name, point)) | dbgmsg("%s</%s> at %s" % (" "*depth, name, point)) | def popping(name, point, depth): if DEBUG: sys.stderr.write("%s</%s> at %s\n" % (" "*depth, name, point)) | 6d0931003cfff1a870c8f6ddb9287b190f316868 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6d0931003cfff1a870c8f6ddb9287b190f316868/latex2esis.py |
stack = [] line = self.line | def subconvert(self, endchar=None, depth=0): if DEBUG and endchar: self.err_write( "subconvert(%s)\n line = %s\n" % (`endchar`, `line[:20]`)) stack = [] line = self.line while line: if line[0] == endchar and not stack: if DEBUG: self.err_write("subconvert() --> %s\n" % `line[1:21]`) self.line = line return line m = _comment_rx.match(line) if m: text = m.group(1) if text: self.write("(COMMENT\n- %s \n)COMMENT\n-\\n\n" % encode(text)) line = line[m.end():] continue m = _begin_env_rx.match(line) if m: # re-write to use the macro handler line = r"\%s %s" % (m.group(1), line[m.end():]) continue m = _end_env_rx.match(line) if m: # end of environment envname = m.group(1) if envname == "document": # special magic for n in stack[1:]: if n not in self.autoclosing: raise LaTeXFormatError( "open element on stack: " + `n`) # should be more careful, but this is easier to code: stack = [] self.write(")document\n") elif envname == stack[-1]: self.write(")%s\n" % envname) del stack[-1] popping(envname, "a", len(stack) + depth) else: self.err_write("stack: %s\n" % `stack`) raise LaTeXFormatError( "environment close for %s doesn't match" % envname) line = line[m.end():] continue m = _begin_macro_rx.match(line) if m: # start of macro macroname = m.group(1) if macroname == "verbatim": # really magic case! pos = string.find(line, "\\end{verbatim}") text = line[m.end(1):pos] self.write("(verbatim\n") self.write("-%s\n" % encode(text)) self.write(")verbatim\n") line = line[pos + len("\\end{verbatim}"):] continue numbered = 1 opened = 0 if macroname[-1] == "*": macroname = macroname[:-1] numbered = 0 if macroname in self.autoclosing and macroname in stack: while stack[-1] != macroname: top = stack.pop() if top and top not in self.discards: self.write(")%s\n-\\n\n" % top) popping(top, "b", len(stack) + depth) if macroname not in self.discards: self.write("-\\n\n)%s\n-\\n\n" % macroname) popping(macroname, "c", len(stack) + depth - 1) del stack[-1] # if macroname in self.discards: self.push_output(StringIO.StringIO()) else: self.push_output(self.ofp) # params, optional, empty, environ = self.start_macro(macroname) if not numbered: self.write("Anumbered TOKEN no\n") # rip off the macroname if params: if optional and len(params) == 1: line = line[m.end():] else: line = line[m.end(1):] elif empty: line = line[m.end(1):] else: line = line[m.end():] # # Very ugly special case to deal with \item[]. The catch # is that this needs to occur outside the for loop that # handles attribute parsing so we can 'continue' the outer # loop. # if optional and type(params[0]) is type(()): # the attribute name isn't used in this special case pushing(macroname, "a", depth + len(stack)) stack.append(macroname) self.write("(%s\n" % macroname) m = _start_optional_rx.match(line) if m: self.line = line[m.end():] line = self.subconvert("]", depth + len(stack)) line = "}" + line continue # handle attribute mappings here: for attrname in params: if optional: optional = 0 if type(attrname) is type(""): m = _optional_rx.match(line) if m: line = line[m.end():] self.write("A%s TOKEN %s\n" % (attrname, encode(m.group(1)))) elif type(attrname) is type(()): # This is a sub-element; but don't place the # element we found on the stack (\section-like) pushing(macroname, "b", len(stack) + depth) stack.append(macroname) self.write("(%s\n" % macroname) macroname = attrname[0] m = _start_group_rx.match(line) if m: line = line[m.end():] elif type(attrname) is type([]): # A normal subelement. attrname = attrname[0] if not opened: opened = 1 self.write("(%s\n" % macroname) pushing(macroname, "c", len(stack) + depth) self.write("(%s\n" % attrname) pushing(attrname, "sub-elem", len(stack) + depth + 1) self.line = skip_white(line)[1:] line = subconvert("}", depth + len(stack) + 2) popping(attrname, "sub-elem", len(stack) + depth + 1) self.write(")%s\n" % attrname) else: m = _parameter_rx.match(line) if not m: raise LaTeXFormatError( "could not extract parameter %s for %s: %s" % (attrname, macroname, `line[:100]`)) value = m.group(1) if _token_rx.match(value): dtype = "TOKEN" else: dtype = "CDATA" self.write("A%s %s %s\n" % (attrname, dtype, encode(value))) line = line[m.end():] if params and type(params[-1]) is type('') \ and (not empty) and not environ: # attempt to strip off next '{' m = _start_group_rx.match(line) if not m: raise LaTeXFormatError( "non-empty element '%s' has no content: %s" % (macroname, line[:12])) line = line[m.end():] if not opened: self.write("(%s\n" % macroname) pushing(macroname, "d", len(stack) + depth) if empty: line = "}" + line stack.append(macroname) self.pop_output() continue if line[0] == endchar and not stack: if DEBUG: self.err_write("subconvert() --> %s\n" % `line[1:21]`) self.line = line[1:] return self.line if line[0] == "}": # end of macro or group macroname = stack[-1] conversion = self.table.get(macroname) if macroname \ and macroname not in self.discards \ and type(conversion) is not type(""): # otherwise, it was just a bare group self.write(")%s\n" % stack[-1]) popping(macroname, "d", len(stack) + depth - 1) del stack[-1] line = line[1:] continue if line[0] == "{": pushing("", "e", len(stack) + depth) stack.append("") line = line[1:] continue if line[0] == "\\" and line[1] in ESCAPED_CHARS: self.write("-%s\n" % encode(line[1])) line = line[2:] continue if line[:2] == r"\\": self.write("(BREAK\n)BREAK\n") line = line[2:] continue m = _text_rx.match(line) if m: text = encode(m.group()) self.write("-%s\n" % text) line = line[m.end():] continue # special case because of \item[] if line[0] == "]": self.write("-]\n") line = line[1:] continue # avoid infinite loops extra = "" if len(line) > 100: extra = "..." raise LaTeXFormatError("could not identify markup: %s%s" % (`line[:100]`, extra)) while stack and stack[-1] in self.autoclosing: self.write("-\\n\n") self.write(")%s\n" % stack[-1]) popping(stack.pop(), "e", len(stack) + depth - 1) if stack: raise LaTeXFormatError("elements remain on stack: " + string.join(stack, ", ")) # otherwise we just ran out of input here... | 6d0931003cfff1a870c8f6ddb9287b190f316868 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6d0931003cfff1a870c8f6ddb9287b190f316868/latex2esis.py |
|
elif envname == stack[-1]: | elif stack and envname == stack[-1]: | def subconvert(self, endchar=None, depth=0): if DEBUG and endchar: self.err_write( "subconvert(%s)\n line = %s\n" % (`endchar`, `line[:20]`)) stack = [] line = self.line while line: if line[0] == endchar and not stack: if DEBUG: self.err_write("subconvert() --> %s\n" % `line[1:21]`) self.line = line return line m = _comment_rx.match(line) if m: text = m.group(1) if text: self.write("(COMMENT\n- %s \n)COMMENT\n-\\n\n" % encode(text)) line = line[m.end():] continue m = _begin_env_rx.match(line) if m: # re-write to use the macro handler line = r"\%s %s" % (m.group(1), line[m.end():]) continue m = _end_env_rx.match(line) if m: # end of environment envname = m.group(1) if envname == "document": # special magic for n in stack[1:]: if n not in self.autoclosing: raise LaTeXFormatError( "open element on stack: " + `n`) # should be more careful, but this is easier to code: stack = [] self.write(")document\n") elif envname == stack[-1]: self.write(")%s\n" % envname) del stack[-1] popping(envname, "a", len(stack) + depth) else: self.err_write("stack: %s\n" % `stack`) raise LaTeXFormatError( "environment close for %s doesn't match" % envname) line = line[m.end():] continue m = _begin_macro_rx.match(line) if m: # start of macro macroname = m.group(1) if macroname == "verbatim": # really magic case! pos = string.find(line, "\\end{verbatim}") text = line[m.end(1):pos] self.write("(verbatim\n") self.write("-%s\n" % encode(text)) self.write(")verbatim\n") line = line[pos + len("\\end{verbatim}"):] continue numbered = 1 opened = 0 if macroname[-1] == "*": macroname = macroname[:-1] numbered = 0 if macroname in self.autoclosing and macroname in stack: while stack[-1] != macroname: top = stack.pop() if top and top not in self.discards: self.write(")%s\n-\\n\n" % top) popping(top, "b", len(stack) + depth) if macroname not in self.discards: self.write("-\\n\n)%s\n-\\n\n" % macroname) popping(macroname, "c", len(stack) + depth - 1) del stack[-1] # if macroname in self.discards: self.push_output(StringIO.StringIO()) else: self.push_output(self.ofp) # params, optional, empty, environ = self.start_macro(macroname) if not numbered: self.write("Anumbered TOKEN no\n") # rip off the macroname if params: if optional and len(params) == 1: line = line[m.end():] else: line = line[m.end(1):] elif empty: line = line[m.end(1):] else: line = line[m.end():] # # Very ugly special case to deal with \item[]. The catch # is that this needs to occur outside the for loop that # handles attribute parsing so we can 'continue' the outer # loop. # if optional and type(params[0]) is type(()): # the attribute name isn't used in this special case pushing(macroname, "a", depth + len(stack)) stack.append(macroname) self.write("(%s\n" % macroname) m = _start_optional_rx.match(line) if m: self.line = line[m.end():] line = self.subconvert("]", depth + len(stack)) line = "}" + line continue # handle attribute mappings here: for attrname in params: if optional: optional = 0 if type(attrname) is type(""): m = _optional_rx.match(line) if m: line = line[m.end():] self.write("A%s TOKEN %s\n" % (attrname, encode(m.group(1)))) elif type(attrname) is type(()): # This is a sub-element; but don't place the # element we found on the stack (\section-like) pushing(macroname, "b", len(stack) + depth) stack.append(macroname) self.write("(%s\n" % macroname) macroname = attrname[0] m = _start_group_rx.match(line) if m: line = line[m.end():] elif type(attrname) is type([]): # A normal subelement. attrname = attrname[0] if not opened: opened = 1 self.write("(%s\n" % macroname) pushing(macroname, "c", len(stack) + depth) self.write("(%s\n" % attrname) pushing(attrname, "sub-elem", len(stack) + depth + 1) self.line = skip_white(line)[1:] line = subconvert("}", depth + len(stack) + 2) popping(attrname, "sub-elem", len(stack) + depth + 1) self.write(")%s\n" % attrname) else: m = _parameter_rx.match(line) if not m: raise LaTeXFormatError( "could not extract parameter %s for %s: %s" % (attrname, macroname, `line[:100]`)) value = m.group(1) if _token_rx.match(value): dtype = "TOKEN" else: dtype = "CDATA" self.write("A%s %s %s\n" % (attrname, dtype, encode(value))) line = line[m.end():] if params and type(params[-1]) is type('') \ and (not empty) and not environ: # attempt to strip off next '{' m = _start_group_rx.match(line) if not m: raise LaTeXFormatError( "non-empty element '%s' has no content: %s" % (macroname, line[:12])) line = line[m.end():] if not opened: self.write("(%s\n" % macroname) pushing(macroname, "d", len(stack) + depth) if empty: line = "}" + line stack.append(macroname) self.pop_output() continue if line[0] == endchar and not stack: if DEBUG: self.err_write("subconvert() --> %s\n" % `line[1:21]`) self.line = line[1:] return self.line if line[0] == "}": # end of macro or group macroname = stack[-1] conversion = self.table.get(macroname) if macroname \ and macroname not in self.discards \ and type(conversion) is not type(""): # otherwise, it was just a bare group self.write(")%s\n" % stack[-1]) popping(macroname, "d", len(stack) + depth - 1) del stack[-1] line = line[1:] continue if line[0] == "{": pushing("", "e", len(stack) + depth) stack.append("") line = line[1:] continue if line[0] == "\\" and line[1] in ESCAPED_CHARS: self.write("-%s\n" % encode(line[1])) line = line[2:] continue if line[:2] == r"\\": self.write("(BREAK\n)BREAK\n") line = line[2:] continue m = _text_rx.match(line) if m: text = encode(m.group()) self.write("-%s\n" % text) line = line[m.end():] continue # special case because of \item[] if line[0] == "]": self.write("-]\n") line = line[1:] continue # avoid infinite loops extra = "" if len(line) > 100: extra = "..." raise LaTeXFormatError("could not identify markup: %s%s" % (`line[:100]`, extra)) while stack and stack[-1] in self.autoclosing: self.write("-\\n\n") self.write(")%s\n" % stack[-1]) popping(stack.pop(), "e", len(stack) + depth - 1) if stack: raise LaTeXFormatError("elements remain on stack: " + string.join(stack, ", ")) # otherwise we just ran out of input here... | 6d0931003cfff1a870c8f6ddb9287b190f316868 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6d0931003cfff1a870c8f6ddb9287b190f316868/latex2esis.py |
def subconvert(self, endchar=None, depth=0): if DEBUG and endchar: self.err_write( "subconvert(%s)\n line = %s\n" % (`endchar`, `line[:20]`)) stack = [] line = self.line while line: if line[0] == endchar and not stack: if DEBUG: self.err_write("subconvert() --> %s\n" % `line[1:21]`) self.line = line return line m = _comment_rx.match(line) if m: text = m.group(1) if text: self.write("(COMMENT\n- %s \n)COMMENT\n-\\n\n" % encode(text)) line = line[m.end():] continue m = _begin_env_rx.match(line) if m: # re-write to use the macro handler line = r"\%s %s" % (m.group(1), line[m.end():]) continue m = _end_env_rx.match(line) if m: # end of environment envname = m.group(1) if envname == "document": # special magic for n in stack[1:]: if n not in self.autoclosing: raise LaTeXFormatError( "open element on stack: " + `n`) # should be more careful, but this is easier to code: stack = [] self.write(")document\n") elif envname == stack[-1]: self.write(")%s\n" % envname) del stack[-1] popping(envname, "a", len(stack) + depth) else: self.err_write("stack: %s\n" % `stack`) raise LaTeXFormatError( "environment close for %s doesn't match" % envname) line = line[m.end():] continue m = _begin_macro_rx.match(line) if m: # start of macro macroname = m.group(1) if macroname == "verbatim": # really magic case! pos = string.find(line, "\\end{verbatim}") text = line[m.end(1):pos] self.write("(verbatim\n") self.write("-%s\n" % encode(text)) self.write(")verbatim\n") line = line[pos + len("\\end{verbatim}"):] continue numbered = 1 opened = 0 if macroname[-1] == "*": macroname = macroname[:-1] numbered = 0 if macroname in self.autoclosing and macroname in stack: while stack[-1] != macroname: top = stack.pop() if top and top not in self.discards: self.write(")%s\n-\\n\n" % top) popping(top, "b", len(stack) + depth) if macroname not in self.discards: self.write("-\\n\n)%s\n-\\n\n" % macroname) popping(macroname, "c", len(stack) + depth - 1) del stack[-1] # if macroname in self.discards: self.push_output(StringIO.StringIO()) else: self.push_output(self.ofp) # params, optional, empty, environ = self.start_macro(macroname) if not numbered: self.write("Anumbered TOKEN no\n") # rip off the macroname if params: if optional and len(params) == 1: line = line[m.end():] else: line = line[m.end(1):] elif empty: line = line[m.end(1):] else: line = line[m.end():] # # Very ugly special case to deal with \item[]. The catch # is that this needs to occur outside the for loop that # handles attribute parsing so we can 'continue' the outer # loop. # if optional and type(params[0]) is type(()): # the attribute name isn't used in this special case pushing(macroname, "a", depth + len(stack)) stack.append(macroname) self.write("(%s\n" % macroname) m = _start_optional_rx.match(line) if m: self.line = line[m.end():] line = self.subconvert("]", depth + len(stack)) line = "}" + line continue # handle attribute mappings here: for attrname in params: if optional: optional = 0 if type(attrname) is type(""): m = _optional_rx.match(line) if m: line = line[m.end():] self.write("A%s TOKEN %s\n" % (attrname, encode(m.group(1)))) elif type(attrname) is type(()): # This is a sub-element; but don't place the # element we found on the stack (\section-like) pushing(macroname, "b", len(stack) + depth) stack.append(macroname) self.write("(%s\n" % macroname) macroname = attrname[0] m = _start_group_rx.match(line) if m: line = line[m.end():] elif type(attrname) is type([]): # A normal subelement. attrname = attrname[0] if not opened: opened = 1 self.write("(%s\n" % macroname) pushing(macroname, "c", len(stack) + depth) self.write("(%s\n" % attrname) pushing(attrname, "sub-elem", len(stack) + depth + 1) self.line = skip_white(line)[1:] line = subconvert("}", depth + len(stack) + 2) popping(attrname, "sub-elem", len(stack) + depth + 1) self.write(")%s\n" % attrname) else: m = _parameter_rx.match(line) if not m: raise LaTeXFormatError( "could not extract parameter %s for %s: %s" % (attrname, macroname, `line[:100]`)) value = m.group(1) if _token_rx.match(value): dtype = "TOKEN" else: dtype = "CDATA" self.write("A%s %s %s\n" % (attrname, dtype, encode(value))) line = line[m.end():] if params and type(params[-1]) is type('') \ and (not empty) and not environ: # attempt to strip off next '{' m = _start_group_rx.match(line) if not m: raise LaTeXFormatError( "non-empty element '%s' has no content: %s" % (macroname, line[:12])) line = line[m.end():] if not opened: self.write("(%s\n" % macroname) pushing(macroname, "d", len(stack) + depth) if empty: line = "}" + line stack.append(macroname) self.pop_output() continue if line[0] == endchar and not stack: if DEBUG: self.err_write("subconvert() --> %s\n" % `line[1:21]`) self.line = line[1:] return self.line if line[0] == "}": # end of macro or group macroname = stack[-1] conversion = self.table.get(macroname) if macroname \ and macroname not in self.discards \ and type(conversion) is not type(""): # otherwise, it was just a bare group self.write(")%s\n" % stack[-1]) popping(macroname, "d", len(stack) + depth - 1) del stack[-1] line = line[1:] continue if line[0] == "{": pushing("", "e", len(stack) + depth) stack.append("") line = line[1:] continue if line[0] == "\\" and line[1] in ESCAPED_CHARS: self.write("-%s\n" % encode(line[1])) line = line[2:] continue if line[:2] == r"\\": self.write("(BREAK\n)BREAK\n") line = line[2:] continue m = _text_rx.match(line) if m: text = encode(m.group()) self.write("-%s\n" % text) line = line[m.end():] continue # special case because of \item[] if line[0] == "]": self.write("-]\n") line = line[1:] continue # avoid infinite loops extra = "" if len(line) > 100: extra = "..." raise LaTeXFormatError("could not identify markup: %s%s" % (`line[:100]`, extra)) while stack and stack[-1] in self.autoclosing: self.write("-\\n\n") self.write(")%s\n" % stack[-1]) popping(stack.pop(), "e", len(stack) + depth - 1) if stack: raise LaTeXFormatError("elements remain on stack: " + string.join(stack, ", ")) # otherwise we just ran out of input here... | 6d0931003cfff1a870c8f6ddb9287b190f316868 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6d0931003cfff1a870c8f6ddb9287b190f316868/latex2esis.py |
||
line = subconvert("}", depth + len(stack) + 2) | line = self.subconvert("}", len(stack) + depth + 1)[1:] dbgmsg("subconvert() ==> " + `line[:20]`) | def subconvert(self, endchar=None, depth=0): if DEBUG and endchar: self.err_write( "subconvert(%s)\n line = %s\n" % (`endchar`, `line[:20]`)) stack = [] line = self.line while line: if line[0] == endchar and not stack: if DEBUG: self.err_write("subconvert() --> %s\n" % `line[1:21]`) self.line = line return line m = _comment_rx.match(line) if m: text = m.group(1) if text: self.write("(COMMENT\n- %s \n)COMMENT\n-\\n\n" % encode(text)) line = line[m.end():] continue m = _begin_env_rx.match(line) if m: # re-write to use the macro handler line = r"\%s %s" % (m.group(1), line[m.end():]) continue m = _end_env_rx.match(line) if m: # end of environment envname = m.group(1) if envname == "document": # special magic for n in stack[1:]: if n not in self.autoclosing: raise LaTeXFormatError( "open element on stack: " + `n`) # should be more careful, but this is easier to code: stack = [] self.write(")document\n") elif envname == stack[-1]: self.write(")%s\n" % envname) del stack[-1] popping(envname, "a", len(stack) + depth) else: self.err_write("stack: %s\n" % `stack`) raise LaTeXFormatError( "environment close for %s doesn't match" % envname) line = line[m.end():] continue m = _begin_macro_rx.match(line) if m: # start of macro macroname = m.group(1) if macroname == "verbatim": # really magic case! pos = string.find(line, "\\end{verbatim}") text = line[m.end(1):pos] self.write("(verbatim\n") self.write("-%s\n" % encode(text)) self.write(")verbatim\n") line = line[pos + len("\\end{verbatim}"):] continue numbered = 1 opened = 0 if macroname[-1] == "*": macroname = macroname[:-1] numbered = 0 if macroname in self.autoclosing and macroname in stack: while stack[-1] != macroname: top = stack.pop() if top and top not in self.discards: self.write(")%s\n-\\n\n" % top) popping(top, "b", len(stack) + depth) if macroname not in self.discards: self.write("-\\n\n)%s\n-\\n\n" % macroname) popping(macroname, "c", len(stack) + depth - 1) del stack[-1] # if macroname in self.discards: self.push_output(StringIO.StringIO()) else: self.push_output(self.ofp) # params, optional, empty, environ = self.start_macro(macroname) if not numbered: self.write("Anumbered TOKEN no\n") # rip off the macroname if params: if optional and len(params) == 1: line = line[m.end():] else: line = line[m.end(1):] elif empty: line = line[m.end(1):] else: line = line[m.end():] # # Very ugly special case to deal with \item[]. The catch # is that this needs to occur outside the for loop that # handles attribute parsing so we can 'continue' the outer # loop. # if optional and type(params[0]) is type(()): # the attribute name isn't used in this special case pushing(macroname, "a", depth + len(stack)) stack.append(macroname) self.write("(%s\n" % macroname) m = _start_optional_rx.match(line) if m: self.line = line[m.end():] line = self.subconvert("]", depth + len(stack)) line = "}" + line continue # handle attribute mappings here: for attrname in params: if optional: optional = 0 if type(attrname) is type(""): m = _optional_rx.match(line) if m: line = line[m.end():] self.write("A%s TOKEN %s\n" % (attrname, encode(m.group(1)))) elif type(attrname) is type(()): # This is a sub-element; but don't place the # element we found on the stack (\section-like) pushing(macroname, "b", len(stack) + depth) stack.append(macroname) self.write("(%s\n" % macroname) macroname = attrname[0] m = _start_group_rx.match(line) if m: line = line[m.end():] elif type(attrname) is type([]): # A normal subelement. attrname = attrname[0] if not opened: opened = 1 self.write("(%s\n" % macroname) pushing(macroname, "c", len(stack) + depth) self.write("(%s\n" % attrname) pushing(attrname, "sub-elem", len(stack) + depth + 1) self.line = skip_white(line)[1:] line = subconvert("}", depth + len(stack) + 2) popping(attrname, "sub-elem", len(stack) + depth + 1) self.write(")%s\n" % attrname) else: m = _parameter_rx.match(line) if not m: raise LaTeXFormatError( "could not extract parameter %s for %s: %s" % (attrname, macroname, `line[:100]`)) value = m.group(1) if _token_rx.match(value): dtype = "TOKEN" else: dtype = "CDATA" self.write("A%s %s %s\n" % (attrname, dtype, encode(value))) line = line[m.end():] if params and type(params[-1]) is type('') \ and (not empty) and not environ: # attempt to strip off next '{' m = _start_group_rx.match(line) if not m: raise LaTeXFormatError( "non-empty element '%s' has no content: %s" % (macroname, line[:12])) line = line[m.end():] if not opened: self.write("(%s\n" % macroname) pushing(macroname, "d", len(stack) + depth) if empty: line = "}" + line stack.append(macroname) self.pop_output() continue if line[0] == endchar and not stack: if DEBUG: self.err_write("subconvert() --> %s\n" % `line[1:21]`) self.line = line[1:] return self.line if line[0] == "}": # end of macro or group macroname = stack[-1] conversion = self.table.get(macroname) if macroname \ and macroname not in self.discards \ and type(conversion) is not type(""): # otherwise, it was just a bare group self.write(")%s\n" % stack[-1]) popping(macroname, "d", len(stack) + depth - 1) del stack[-1] line = line[1:] continue if line[0] == "{": pushing("", "e", len(stack) + depth) stack.append("") line = line[1:] continue if line[0] == "\\" and line[1] in ESCAPED_CHARS: self.write("-%s\n" % encode(line[1])) line = line[2:] continue if line[:2] == r"\\": self.write("(BREAK\n)BREAK\n") line = line[2:] continue m = _text_rx.match(line) if m: text = encode(m.group()) self.write("-%s\n" % text) line = line[m.end():] continue # special case because of \item[] if line[0] == "]": self.write("-]\n") line = line[1:] continue # avoid infinite loops extra = "" if len(line) > 100: extra = "..." raise LaTeXFormatError("could not identify markup: %s%s" % (`line[:100]`, extra)) while stack and stack[-1] in self.autoclosing: self.write("-\\n\n") self.write(")%s\n" % stack[-1]) popping(stack.pop(), "e", len(stack) + depth - 1) if stack: raise LaTeXFormatError("elements remain on stack: " + string.join(stack, ", ")) # otherwise we just ran out of input here... | 6d0931003cfff1a870c8f6ddb9287b190f316868 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6d0931003cfff1a870c8f6ddb9287b190f316868/latex2esis.py |
print dom | confirm(isinstance(dom,Document)) | def testParseFromFile(): from StringIO import StringIO dom=parse( StringIO( open( tstfile ).read() ) ) print dom | 5bf8a73939a19cf1030bfeb06c3bd7ca7b50c069 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5bf8a73939a19cf1030bfeb06c3bd7ca7b50c069/test_minidom.py |
print Node.allnodes.items()[0:10] | if verbose: print Node.allnodes.items()[0:10] else: print len(Node.allnodes) | def testClonePIDeep(): pass | 5bf8a73939a19cf1030bfeb06c3bd7ca7b50c069 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5bf8a73939a19cf1030bfeb06c3bd7ca7b50c069/test_minidom.py |
cur = (32767 * viewoffset) / (destheight - viewheight) | cur = (32767L * viewoffset) / (destheight - viewheight) | def vscroll(self, value): lineheight = self.ted.WEGetHeight(0, 1) dr = self.ted.WEGetDestRect() vr = self.ted.WEGetViewRect() destheight = dr[3] - dr[1] viewheight = vr[3] - vr[1] viewoffset = maxdelta = vr[1] - dr[1] mindelta = vr[3] - dr[3] if value == "+": delta = lineheight elif value == "-": delta = - lineheight elif value == "++": delta = viewheight - lineheight elif value == "--": delta = lineheight - viewheight else: # in thumb cur = (32767 * viewoffset) / (destheight - viewheight) delta = (cur-value)*(destheight - viewheight)/32767 if abs(delta - viewoffset) <=2: # compensate for irritating rounding error delta = viewoffset delta = min(maxdelta, delta) delta = max(mindelta, delta) self.ted.WEScroll(0, delta) self.updatescrollbars() | ef2f1d088151664c93bdc214ed2e0d023bc91fb4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ef2f1d088151664c93bdc214ed2e0d023bc91fb4/Wtext.py |
def __init__(self, _subtype='mixed', boundary=None, *_subparts, **_params): | def __init__(self, _subtype='mixed', boundary=None, _subparts=None, **_params): | def __init__(self, _subtype='mixed', boundary=None, *_subparts, **_params): """Creates a multipart/* type message. | b8c24f675337403d66d32fc07b1cd53b7a1814f0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/b8c24f675337403d66d32fc07b1cd53b7a1814f0/MIMEMultipart.py |
must be possible to convert this sequence to a list. You can always | must be an iterable object, such as a list. You can always | def __init__(self, _subtype='mixed', boundary=None, *_subparts, **_params): """Creates a multipart/* type message. | b8c24f675337403d66d32fc07b1cd53b7a1814f0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/b8c24f675337403d66d32fc07b1cd53b7a1814f0/MIMEMultipart.py |
self.attach(*list(_subparts)) | for p in _subparts: self.attach(p) | def __init__(self, _subtype='mixed', boundary=None, *_subparts, **_params): """Creates a multipart/* type message. | b8c24f675337403d66d32fc07b1cd53b7a1814f0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/b8c24f675337403d66d32fc07b1cd53b7a1814f0/MIMEMultipart.py |
raise SMTPServerDisconnected | raise SMTPServerDisconnected('Server not connected') | def send(self, str): """Send `str' to the server.""" if self.debuglevel > 0: print 'send:', `str` if self.sock: try: self.sock.send(str) except socket.error: raise SMTPServerDisconnected else: raise SMTPServerDisconnected | ad9efc67a02965623d670328d6a8df26705fa9ad /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ad9efc67a02965623d670328d6a8df26705fa9ad/smtplib.py |
raise SMTPServerDisconnected | raise SMTPServerDisconnected('please run connect() first') | def send(self, str): """Send `str' to the server.""" if self.debuglevel > 0: print 'send:', `str` if self.sock: try: self.sock.send(str) except socket.error: raise SMTPServerDisconnected else: raise SMTPServerDisconnected | ad9efc67a02965623d670328d6a8df26705fa9ad /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ad9efc67a02965623d670328d6a8df26705fa9ad/smtplib.py |
raise SMTPServerDisconnected | raise SMTPServerDisconnected("Server not connected") | def ehlo(self, name=''): """ SMTP 'ehlo' command. Hostname to send for this command defaults to the FQDN of the local host. """ name=string.strip(name) if len(name)==0: name=socket.gethostbyaddr(socket.gethostname())[0] self.putcmd("ehlo",name) (code,msg)=self.getreply() # According to RFC1869 some (badly written) # MTA's will disconnect on an ehlo. Toss an exception if # that happens -ddm if code == -1 and len(msg) == 0: raise SMTPServerDisconnected self.ehlo_resp=msg if code<>250: return code self.does_esmtp=1 #parse the ehlo responce -ddm resp=string.split(self.ehlo_resp,'\n') del resp[0] for each in resp: m=re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*)',each) if m: feature=string.lower(m.group("feature")) params=string.strip(m.string[m.end("feature"):]) self.esmtp_features[feature]=params return code | ad9efc67a02965623d670328d6a8df26705fa9ad /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ad9efc67a02965623d670328d6a8df26705fa9ad/smtplib.py |
raise SMTPSenderRefused | raise SMTPSenderRefused('%s: %s' % (from_addr, resp)) | def sendmail(self, from_addr, to_addrs, msg, mail_options=[], rcpt_options=[]): """This command performs an entire mail transaction. | ad9efc67a02965623d670328d6a8df26705fa9ad /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ad9efc67a02965623d670328d6a8df26705fa9ad/smtplib.py |
raise SMTPRecipientsRefused | raise SMTPRecipientsRefused(string.join( map(lambda x:"%s: %s" % (x[0], x[1][1]), senderrs.items()), '; ')) | def sendmail(self, from_addr, to_addrs, msg, mail_options=[], rcpt_options=[]): """This command performs an entire mail transaction. | ad9efc67a02965623d670328d6a8df26705fa9ad /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ad9efc67a02965623d670328d6a8df26705fa9ad/smtplib.py |
raise SMTPDataError | raise SMTPDataError('data transmission error: %s' % code) | def sendmail(self, from_addr, to_addrs, msg, mail_options=[], rcpt_options=[]): """This command performs an entire mail transaction. | ad9efc67a02965623d670328d6a8df26705fa9ad /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ad9efc67a02965623d670328d6a8df26705fa9ad/smtplib.py |
self.filename = _normpath(filename) | self.orig_filename = filename null_byte = filename.find(chr(0)) if null_byte >= 0: filename = filename[0:null_byte] print "File name %s contains a suspicious null byte!" % filename if os.sep != "/": filename = filename.replace(os.sep, "/") self.filename = filename | def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)): self.filename = _normpath(filename) # Name of the file in the archive self.date_time = date_time # year, month, day, hour, min, sec # Standard values: self.compress_type = ZIP_STORED # Type of compression for the file self.comment = "" # Comment for each file self.extra = "" # ZIP extra data self.create_system = 0 # System which created ZIP archive self.create_version = 20 # Version which created ZIP archive self.extract_version = 20 # Version needed to extract archive self.reserved = 0 # Must be zero self.flag_bits = 0 # ZIP flag bits self.volume = 0 # Volume number of file header self.internal_attr = 0 # Internal attributes self.external_attr = 0 # External file attributes # Other attributes are set by class ZipFile: # header_offset Byte offset to the file header # file_offset Byte offset to the start of the file data # CRC CRC-32 of the uncompressed file # compress_size Size of the compressed file # file_size Size of the uncompressed file | 40584416f7c8f937f364bbbfb279a21318b1157e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/40584416f7c8f937f364bbbfb279a21318b1157e/zipfile.py |
if os.sep != "/": def _normpath(path): return path.replace(os.sep, "/") else: def _normpath(path): return path | def FileHeader(self): """Return the per-file header as a string.""" dt = self.date_time dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) if self.flag_bits & 0x08: # Set these to zero because we write them after the file data CRC = compress_size = file_size = 0 else: CRC = self.CRC compress_size = self.compress_size file_size = self.file_size header = struct.pack(structFileHeader, stringFileHeader, self.extract_version, self.reserved, self.flag_bits, self.compress_type, dostime, dosdate, CRC, compress_size, file_size, len(self.filename), len(self.extra)) return header + self.filename + self.extra | 40584416f7c8f937f364bbbfb279a21318b1157e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/40584416f7c8f937f364bbbfb279a21318b1157e/zipfile.py |
|
if fname != data.filename: | if fname != data.orig_filename: | def _RealGetContents(self): """Read in the table of contents for the ZIP file.""" fp = self.fp endrec = _EndRecData(fp) if not endrec: raise BadZipfile, "File is not a zip file" if self.debug > 1: print endrec size_cd = endrec[5] # bytes in central directory offset_cd = endrec[6] # offset of central directory self.comment = endrec[8] # archive comment # endrec[9] is the offset of the "End of Central Dir" record x = endrec[9] - size_cd # "concat" is zero, unless zip was concatenated to another file concat = x - offset_cd if self.debug > 2: print "given, inferred, offset", offset_cd, x, concat # self.start_dir: Position of start of central directory self.start_dir = offset_cd + concat fp.seek(self.start_dir, 0) total = 0 while total < size_cd: centdir = fp.read(46) total = total + 46 if centdir[0:4] != stringCentralDir: raise BadZipfile, "Bad magic number for central directory" centdir = struct.unpack(structCentralDir, centdir) if self.debug > 2: print centdir filename = fp.read(centdir[_CD_FILENAME_LENGTH]) # Create ZipInfo instance to store file information x = ZipInfo(filename) x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) total = (total + centdir[_CD_FILENAME_LENGTH] + centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]) x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] + concat # file_offset must be computed below... (x.create_version, x.create_system, x.extract_version, x.reserved, x.flag_bits, x.compress_type, t, d, x.CRC, x.compress_size, x.file_size) = centdir[1:12] x.volume, x.internal_attr, x.external_attr = centdir[15:18] # Convert date/time code to (year, month, day, hour, min, sec) x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) self.filelist.append(x) self.NameToInfo[x.filename] = x if self.debug > 2: print "total", total for data in self.filelist: fp.seek(data.header_offset, 0) fheader = fp.read(30) if fheader[0:4] != stringFileHeader: raise BadZipfile, "Bad magic number for file header" fheader = struct.unpack(structFileHeader, fheader) # file_offset is computed here, since the extra field for # the central directory and for the local file header # refer to different fields, and they can have different # lengths data.file_offset = (data.header_offset + 30 + fheader[_FH_FILENAME_LENGTH] + fheader[_FH_EXTRA_FIELD_LENGTH]) fname = fp.read(fheader[_FH_FILENAME_LENGTH]) if fname != data.filename: raise RuntimeError, \ 'File name in directory "%s" and header "%s" differ.' % ( data.filename, fname) | 40584416f7c8f937f364bbbfb279a21318b1157e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/40584416f7c8f937f364bbbfb279a21318b1157e/zipfile.py |
data.filename, fname) | data.orig_filename, fname) | def _RealGetContents(self): """Read in the table of contents for the ZIP file.""" fp = self.fp endrec = _EndRecData(fp) if not endrec: raise BadZipfile, "File is not a zip file" if self.debug > 1: print endrec size_cd = endrec[5] # bytes in central directory offset_cd = endrec[6] # offset of central directory self.comment = endrec[8] # archive comment # endrec[9] is the offset of the "End of Central Dir" record x = endrec[9] - size_cd # "concat" is zero, unless zip was concatenated to another file concat = x - offset_cd if self.debug > 2: print "given, inferred, offset", offset_cd, x, concat # self.start_dir: Position of start of central directory self.start_dir = offset_cd + concat fp.seek(self.start_dir, 0) total = 0 while total < size_cd: centdir = fp.read(46) total = total + 46 if centdir[0:4] != stringCentralDir: raise BadZipfile, "Bad magic number for central directory" centdir = struct.unpack(structCentralDir, centdir) if self.debug > 2: print centdir filename = fp.read(centdir[_CD_FILENAME_LENGTH]) # Create ZipInfo instance to store file information x = ZipInfo(filename) x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) total = (total + centdir[_CD_FILENAME_LENGTH] + centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]) x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] + concat # file_offset must be computed below... (x.create_version, x.create_system, x.extract_version, x.reserved, x.flag_bits, x.compress_type, t, d, x.CRC, x.compress_size, x.file_size) = centdir[1:12] x.volume, x.internal_attr, x.external_attr = centdir[15:18] # Convert date/time code to (year, month, day, hour, min, sec) x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) self.filelist.append(x) self.NameToInfo[x.filename] = x if self.debug > 2: print "total", total for data in self.filelist: fp.seek(data.header_offset, 0) fheader = fp.read(30) if fheader[0:4] != stringFileHeader: raise BadZipfile, "Bad magic number for file header" fheader = struct.unpack(structFileHeader, fheader) # file_offset is computed here, since the extra field for # the central directory and for the local file header # refer to different fields, and they can have different # lengths data.file_offset = (data.header_offset + 30 + fheader[_FH_FILENAME_LENGTH] + fheader[_FH_EXTRA_FIELD_LENGTH]) fname = fp.read(fheader[_FH_FILENAME_LENGTH]) if fname != data.filename: raise RuntimeError, \ 'File name in directory "%s" and header "%s" differ.' % ( data.filename, fname) | 40584416f7c8f937f364bbbfb279a21318b1157e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/40584416f7c8f937f364bbbfb279a21318b1157e/zipfile.py |
if hasattr(os, 'chmod') and sys.platform[:6] != 'cygwin': | if (hasattr(os, 'chmod') and sys.platform[:6] != 'cygwin' and os.getenv('USER') != 'root'): | def test_rmtree_errors(self): # filename is guaranteed not to exist filename = tempfile.mktemp() self.assertRaises(OSError, shutil.rmtree, filename) | f5c00815cc7a421de9b5e58a15627db284d54be2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f5c00815cc7a421de9b5e58a15627db284d54be2/test_shutil.py |
self.assertEqual(self.errorState, 2) | self.assertEqual(self.errorState, 2, "Expected call to onerror function did not happen.") | def test_on_error(self): self.errorState = 0 os.mkdir(TESTFN) self.childpath = os.path.join(TESTFN, 'a') f = open(self.childpath, 'w') f.close() old_dir_mode = os.stat(TESTFN).st_mode old_child_mode = os.stat(self.childpath).st_mode # Make unwritable. os.chmod(self.childpath, stat.S_IREAD) os.chmod(TESTFN, stat.S_IREAD) | f5c00815cc7a421de9b5e58a15627db284d54be2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f5c00815cc7a421de9b5e58a15627db284d54be2/test_shutil.py |
lines = linecache.getlines(file, getmodule(object).__dict__) | module = getmodule(object) if module: lines = linecache.getlines(file, module.__dict__) else: lines = linecache.getlines(file) | def findsource(object): """Return the entire source file and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of all the lines in the file and the line number indexes a line in that list. An IOError is raised if the source code cannot be retrieved.""" file = getsourcefile(object) or getfile(object) lines = linecache.getlines(file, getmodule(object).__dict__) if not lines: raise IOError('could not get source code') if ismodule(object): return lines, 0 if isclass(object): name = object.__name__ pat = re.compile(r'^\s*class\s*' + name + r'\b') for i in range(len(lines)): if pat.match(lines[i]): return lines, i else: raise IOError('could not find class definition') if ismethod(object): object = object.im_func if isfunction(object): object = object.func_code if istraceback(object): object = object.tb_frame if isframe(object): object = object.f_code if iscode(object): if not hasattr(object, 'co_firstlineno'): raise IOError('could not find function definition') lnum = object.co_firstlineno - 1 pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)') while lnum > 0: if pat.match(lines[lnum]): break lnum = lnum - 1 return lines, lnum raise IOError('could not find code object') | a750466dd216b301085d93eb2545419f0830eda5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/a750466dd216b301085d93eb2545419f0830eda5/inspect.py |
self.failUnless(3 in p, "proxy didn't support __contains__() properly") | self.failUnless(3 in p, "proxy didn't support __contains__() properly") | def test_basic_proxy(self): o = C() self.check_proxy(o, weakref.proxy(o)) | 6389f7287b33da4e52014e29651372296ea546df /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6389f7287b33da4e52014e29651372296ea546df/test_weakref.py |
self.assert_(len(values) == 0, "itervalues() did not touch all values") | self.assert_(len(values) == 0, "itervalues() did not touch all values") | def check_iters(self, dict): # item iterator: items = dict.items() for item in dict.iteritems(): items.remove(item) self.assert_(len(items) == 0, "iteritems() did not touch all items") | 6389f7287b33da4e52014e29651372296ea546df /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6389f7287b33da4e52014e29651372296ea546df/test_weakref.py |
"""Check that WeakValueDictionary class conforms to the mapping protocol""" | """Check that WeakValueDictionary conforms to the mapping protocol""" | def __eq__(self, other): if mutate: # Side effect that mutates the dict, by removing the # last strong reference to a key. del objs[-1] return self.value == other.value | 6389f7287b33da4e52014e29651372296ea546df /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6389f7287b33da4e52014e29651372296ea546df/test_weakref.py |
"""Check that WeakKeyDictionary class conforms to the mapping protocol""" | """Check that WeakKeyDictionary conforms to the mapping protocol""" | def _reference(self): return self.__ref.copy() | 6389f7287b33da4e52014e29651372296ea546df /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6389f7287b33da4e52014e29651372296ea546df/test_weakref.py |
WeakKeyDictionaryTestCase ) | WeakKeyDictionaryTestCase, ) | def test_main(): test_support.run_unittest( ReferencesTestCase, MappingTestCase, WeakValueDictionaryTestCase, WeakKeyDictionaryTestCase ) | 6389f7287b33da4e52014e29651372296ea546df /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6389f7287b33da4e52014e29651372296ea546df/test_weakref.py |
def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') | 9b7d28c8bb9b080af96c007ceb5f444cb376a5fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/9b7d28c8bb9b080af96c007ceb5f444cb376a5fc/setup.py |
||
min_db_ver = (3, 2) | min_db_ver = (3, 3) | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') | 9b7d28c8bb9b080af96c007ceb5f444cb376a5fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/9b7d28c8bb9b080af96c007ceb5f444cb376a5fc/setup.py |
ioloop(s, otheraddr) except KeyboardInterrupt: log('client got intr') except error: log('client got error') | try: ioloop(s, otheraddr) except KeyboardInterrupt: log('client got intr') except error: log('client got error') | def client(hostname): print 'client starting' cmd = 'rsh ' + hostname + ' "cd ' + AUDIODIR cmd = cmd + '; DISPLAY=:0; export DISPLAY' cmd = cmd + '; ' + PYTHON + ' intercom.py -r ' for flag in debug: cmd = cmd + flag + ' ' cmd = cmd + gethostname() cmd = cmd + '"' if debug: print cmd pipe = posix.popen(cmd, 'r') ack = 0 nak = 0 while 1: line = pipe.readline() if not line: break sys.stdout.write('remote: ' + line) if line == 'NAK\n': nak = 1 break elif line == 'ACK\n': ack = 1 break if nak: print 'Remote user doesn\'t want to talk to you.' return if not ack: print 'No acknowledgement (remote side crashed?).' return # print 'Ready...' # s = socket(AF_INET, SOCK_DGRAM) s.bind('', PORT2) # otheraddr = gethostbyname(hostname), PORT1 try: ioloop(s, otheraddr) except KeyboardInterrupt: log('client got intr') except error: log('client got error') finally: s.sendto('', otheraddr) log('client finished sending empty packet to server') # log('client exit') print 'Done.' | 3aa3c8066f746b0585f349663e0a17a5166b6fae /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3aa3c8066f746b0585f349663e0a17a5166b6fae/intercom.py |
parser.set_aliases({'license': 'licence'}) | parser.set_aliases({'licence': 'license'}) | def parse_command_line (self): """Parse the setup script's command line, taken from the 'script_args' instance attribute (which defaults to 'sys.argv[1:]' -- see 'setup()' in core.py). This list is first processed for "global options" -- options that set attributes of the Distribution instance. Then, it is alternately scanned for Distutils commands and options for that command. Each new command terminates the options for the previous command. The allowed options for a command are determined by the 'user_options' attribute of the command class -- thus, we have to be able to load command classes in order to parse the command line. Any error in that 'options' attribute raises DistutilsGetoptError; any error on the command-line raises DistutilsArgError. If no Distutils commands were found on the command line, raises DistutilsArgError. Return true if command-line was successfully parsed and we should carry on with executing commands; false if no errors but we shouldn't execute commands (currently, this only happens if user asks for help). """ # # We now have enough information to show the Macintosh dialog that allows # the user to interactively specify the "command line". # if sys.platform == 'mac': import EasyDialogs cmdlist = self.get_command_list() self.script_args = EasyDialogs.GetArgv( self.global_options + self.display_options, cmdlist) | 939d6753f0b9ebc2cf9fde52026c78bbad592934 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/939d6753f0b9ebc2cf9fde52026c78bbad592934/dist.py |
if type(command) is ClassType and issubclass(klass, Command): | if type(command) is ClassType and issubclass(command, Command): | def _show_help (self, parser, global_options=1, display_options=1, commands=[]): """Show help for the setup script command-line in the form of several lists of command-line options. 'parser' should be a FancyGetopt instance; do not expect it to be returned in the same state, as its option table will be reset to make it generate the correct help text. | 939d6753f0b9ebc2cf9fde52026c78bbad592934 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/939d6753f0b9ebc2cf9fde52026c78bbad592934/dist.py |
self.licence = None | self.license = None | def __init__ (self): self.name = None self.version = None self.author = None self.author_email = None self.maintainer = None self.maintainer_email = None self.url = None self.licence = None self.description = None self.long_description = None self.keywords = None self.platforms = None | 939d6753f0b9ebc2cf9fde52026c78bbad592934 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/939d6753f0b9ebc2cf9fde52026c78bbad592934/dist.py |
pkg_info.write('License: %s\n' % self.get_licence() ) | pkg_info.write('License: %s\n' % self.get_license() ) | def write_pkg_info (self, base_dir): """Write the PKG-INFO file into the release tree. """ | 939d6753f0b9ebc2cf9fde52026c78bbad592934 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/939d6753f0b9ebc2cf9fde52026c78bbad592934/dist.py |
def get_licence(self): return self.licence or "UNKNOWN" | def get_license(self): return self.license or "UNKNOWN" get_licence = get_license | def get_licence(self): return self.licence or "UNKNOWN" | 939d6753f0b9ebc2cf9fde52026c78bbad592934 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/939d6753f0b9ebc2cf9fde52026c78bbad592934/dist.py |
'_topdir %s/%s' % (os.getcwd(), self.rpm_base),]) | '_topdir %s' % os.path.abspath(self.rpm_base)]) | def run (self): | 77555032d91b251aac1910641203c8c94dba6622 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/77555032d91b251aac1910641203c8c94dba6622/bdist_rpm.py |
def __imull__(self, n): self.data += n | def __imul__(self, n): self.data *= n | def __imull__(self, n): self.data += n return self | ab22f026a902dffbf1454103964093ab99457e9a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ab22f026a902dffbf1454103964093ab99457e9a/UserString.py |
def valueseq(self, value1, value2): return value1.gr_name==value2.gr_name and \ value1.gr_gid==value2.gr_gid and value1.gr_mem==value2.gr_mem | def valueseq(self, value1, value2): # are two grp tuples equal (don't compare passwords) return value1.gr_name==value2.gr_name and \ value1.gr_gid==value2.gr_gid and value1.gr_mem==value2.gr_mem | 10712bf02edf9dc6453a644a1361efdd2ba7215a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/10712bf02edf9dc6453a644a1361efdd2ba7215a/test_grp.py |
|
entriesbygid = {} entriesbyname = {} | def test_values(self): entries = grp.getgrall() entriesbygid = {} entriesbyname = {} | 10712bf02edf9dc6453a644a1361efdd2ba7215a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/10712bf02edf9dc6453a644a1361efdd2ba7215a/test_grp.py |
|
entriesbygid.setdefault(e.gr_gid, []).append(e) entriesbyname.setdefault(e.gr_name, []).append(e) | def test_values(self): entries = grp.getgrall() entriesbygid = {} entriesbyname = {} | 10712bf02edf9dc6453a644a1361efdd2ba7215a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/10712bf02edf9dc6453a644a1361efdd2ba7215a/test_grp.py |
|
self.assert_(max([self.valueseq(e2, x) \ for x in entriesbygid[e.gr_gid]])) | self.assertEqual(e2.gr_gid, e.gr_gid) | def test_values(self): entries = grp.getgrall() entriesbygid = {} entriesbyname = {} | 10712bf02edf9dc6453a644a1361efdd2ba7215a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/10712bf02edf9dc6453a644a1361efdd2ba7215a/test_grp.py |
self.assert_(max([self.valueseq(e2, x) \ for x in entriesbyname[e.gr_name]])) | self.assertEqual(e2.gr_name, e.gr_name) | def test_values(self): entries = grp.getgrall() entriesbygid = {} entriesbyname = {} | 10712bf02edf9dc6453a644a1361efdd2ba7215a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/10712bf02edf9dc6453a644a1361efdd2ba7215a/test_grp.py |
filename="<testcase>", mode="exec"): | filename="<testcase>", mode="exec", subclass=None): | def _check_error(self, code, errtext, filename="<testcase>", mode="exec"): """Check that compiling code raises SyntaxError with errtext. | 195f2ea10b13d5e12b96c37455a34e60ea202542 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/195f2ea10b13d5e12b96c37455a34e60ea202542/test_syntax.py |
test of the exception raised. | test of the exception raised. If subclass is specified it is the expected subclass of SyntaxError (e.g. IndentationError). | def _check_error(self, code, errtext, filename="<testcase>", mode="exec"): """Check that compiling code raises SyntaxError with errtext. | 195f2ea10b13d5e12b96c37455a34e60ea202542 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/195f2ea10b13d5e12b96c37455a34e60ea202542/test_syntax.py |
Triple = group("'''", '"""', "r'''", 'r"""') | Triple = group("[rR]?'''", '[rR]?"""') | def maybe(*choices): return apply(group, choices) + '?' | d11b888f9fc4baa2e0d763a0360c02bc7403a9ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d11b888f9fc4baa2e0d763a0360c02bc7403a9ca/tokenize.py |
ContStr = group("r?'" + any(r'\\.', r"[^\n'\\]") + group("'", r'\\\r?\n'), 'r?"' + any(r'\\.', r'[^\n"\\]') + group('"', r'\\\r?\n')) | ContStr = group("[rR]?'" + any(r'\\.', r"[^\n'\\]") + group("'", r'\\\r?\n'), '[rR]?"' + any(r'\\.', r'[^\n"\\]') + group('"', r'\\\r?\n')) | def maybe(*choices): return apply(group, choices) + '?' | d11b888f9fc4baa2e0d763a0360c02bc7403a9ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d11b888f9fc4baa2e0d763a0360c02bc7403a9ca/tokenize.py |
endprogs = {"'": re.compile(Single), '"': re.compile(Double), 'r': None, | endprogs = {"'": re.compile(Single), '"': re.compile(Double), | def maybe(*choices): return apply(group, choices) + '?' | d11b888f9fc4baa2e0d763a0360c02bc7403a9ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d11b888f9fc4baa2e0d763a0360c02bc7403a9ca/tokenize.py |
"r'''": single3prog, 'r"""': double3prog} | "r'''": single3prog, 'r"""': double3prog, "R'''": single3prog, 'R"""': double3prog, 'r': None, 'R': None} | def maybe(*choices): return apply(group, choices) + '?' | d11b888f9fc4baa2e0d763a0360c02bc7403a9ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d11b888f9fc4baa2e0d763a0360c02bc7403a9ca/tokenize.py |
elif token in ("'''",'"""',"r'''",'r"""'): | elif token in ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""'): | def tokenize(readline, tokeneater=printtoken): lnum = parenlev = continued = 0 namechars, numchars = string.letters + '_', string.digits contstr, needcont = '', 0 indents = [0] while 1: # loop over lines in stream line = readline() lnum = lnum + 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError, ("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) tokeneater(STRING, contstr + line[:end], strstart, (lnum, end), line) contstr, needcont = '', 0 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': tokeneater(ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), line) contstr = '' continue else: contstr = contstr + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column = column + 1 elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos = pos + 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines tokeneater((NEWLINE, COMMENT)[line[pos] == '#'], line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) tokeneater(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: indents = indents[:-1] tokeneater(DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError, ("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = pseudoprog.match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end token, initial = line[start:end], line[start] if initial in numchars \ or (initial == '.' and token != '.'): # ordinary number tokeneater(NUMBER, token, spos, epos, line) elif initial in '\r\n': tokeneater(NEWLINE, token, spos, epos, line) elif initial == '#': tokeneater(COMMENT, token, spos, epos, line) elif token in ("'''",'"""',"r'''",'r"""'): # triple-quoted endprog = endprogs[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] tokeneater(STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] break elif initial in ("'", '"') or token[:2] in ("r'", 'r"'): if token[-1] == '\n': # continued string strstart = (lnum, start) endprog = endprogs[initial] or endprogs[token[1]] contstr, needcont = line[start:], 1 break else: # ordinary string tokeneater(STRING, token, spos, epos, line) elif initial in namechars: # ordinary name tokeneater(NAME, token, spos, epos, line) elif initial == '\\': # continued stmt continued = 1 else: if initial in '([{': parenlev = parenlev + 1 elif initial in ')]}': parenlev = parenlev - 1 tokeneater(OP, token, spos, epos, line) else: tokeneater(ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos = pos + 1 for indent in indents[1:]: # pop remaining indent levels tokeneater(DEDENT, '', (lnum, 0), (lnum, 0), '') tokeneater(ENDMARKER, '', (lnum, 0), (lnum, 0), '') | d11b888f9fc4baa2e0d763a0360c02bc7403a9ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d11b888f9fc4baa2e0d763a0360c02bc7403a9ca/tokenize.py |
elif initial in ("'", '"') or token[:2] in ("r'", 'r"'): | elif initial in ("'", '"') or \ token[:2] in ("r'", 'r"', "R'", 'R"'): | def tokenize(readline, tokeneater=printtoken): lnum = parenlev = continued = 0 namechars, numchars = string.letters + '_', string.digits contstr, needcont = '', 0 indents = [0] while 1: # loop over lines in stream line = readline() lnum = lnum + 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError, ("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) tokeneater(STRING, contstr + line[:end], strstart, (lnum, end), line) contstr, needcont = '', 0 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': tokeneater(ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), line) contstr = '' continue else: contstr = contstr + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column = column + 1 elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos = pos + 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines tokeneater((NEWLINE, COMMENT)[line[pos] == '#'], line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) tokeneater(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: indents = indents[:-1] tokeneater(DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError, ("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = pseudoprog.match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end token, initial = line[start:end], line[start] if initial in numchars \ or (initial == '.' and token != '.'): # ordinary number tokeneater(NUMBER, token, spos, epos, line) elif initial in '\r\n': tokeneater(NEWLINE, token, spos, epos, line) elif initial == '#': tokeneater(COMMENT, token, spos, epos, line) elif token in ("'''",'"""',"r'''",'r"""'): # triple-quoted endprog = endprogs[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] tokeneater(STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] break elif initial in ("'", '"') or token[:2] in ("r'", 'r"'): if token[-1] == '\n': # continued string strstart = (lnum, start) endprog = endprogs[initial] or endprogs[token[1]] contstr, needcont = line[start:], 1 break else: # ordinary string tokeneater(STRING, token, spos, epos, line) elif initial in namechars: # ordinary name tokeneater(NAME, token, spos, epos, line) elif initial == '\\': # continued stmt continued = 1 else: if initial in '([{': parenlev = parenlev + 1 elif initial in ')]}': parenlev = parenlev - 1 tokeneater(OP, token, spos, epos, line) else: tokeneater(ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos = pos + 1 for indent in indents[1:]: # pop remaining indent levels tokeneater(DEDENT, '', (lnum, 0), (lnum, 0), '') tokeneater(ENDMARKER, '', (lnum, 0), (lnum, 0), '') | d11b888f9fc4baa2e0d763a0360c02bc7403a9ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d11b888f9fc4baa2e0d763a0360c02bc7403a9ca/tokenize.py |
self.compiler.find_library_file(lib_dirs, 'panel')): | self.compiler.find_library_file(lib_dirs, panel_library)): | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') | 67832e0fdd07421a260e8c424e5015c9bb79b93e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/67832e0fdd07421a260e8c424e5015c9bb79b93e/setup.py |
libraries = ['panel'] + curses_libs) ) | libraries = [panel_library] + curses_libs) ) | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') | 67832e0fdd07421a260e8c424e5015c9bb79b93e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/67832e0fdd07421a260e8c424e5015c9bb79b93e/setup.py |
self._cont_handler = handler.ContentHandler() self._err_handler = handler.ErrorHandler() | self._cont_handler = handler.ContentHandler() self._err_handler = handler.ErrorHandler() | def __init__(self): | 6495c96ce68c1d4088edc1f493b35e11103b0d82 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6495c96ce68c1d4088edc1f493b35e11103b0d82/xmlreader.py |
"Parse an XML document from a system identifier or an InputSource." | "Parse an XML document from a system identifier or an InputSource." | def parse(self, source): | 6495c96ce68c1d4088edc1f493b35e11103b0d82 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6495c96ce68c1d4088edc1f493b35e11103b0d82/xmlreader.py |
"Register an object to receive basic DTD-related events." self._dtd_handler = handler | "Register an object to receive basic DTD-related events." self._dtd_handler = handler | def setDTDHandler(self, handler): | 6495c96ce68c1d4088edc1f493b35e11103b0d82 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6495c96ce68c1d4088edc1f493b35e11103b0d82/xmlreader.py |
"Register an object to resolve external entities." self._ent_handler = resolver | "Register an object to resolve external entities." self._ent_handler = resolver | def setEntityResolver(self, resolver): | 6495c96ce68c1d4088edc1f493b35e11103b0d82 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6495c96ce68c1d4088edc1f493b35e11103b0d82/xmlreader.py |
"Register an object to receive error-message events." self._err_handler = handler | "Register an object to receive error-message events." self._err_handler = handler | def setErrorHandler(self, handler): | 6495c96ce68c1d4088edc1f493b35e11103b0d82 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6495c96ce68c1d4088edc1f493b35e11103b0d82/xmlreader.py |
"Return the column number where the current event ends." return -1 | "Return the column number where the current event ends." return -1 | def getColumnNumber(self): | 6495c96ce68c1d4088edc1f493b35e11103b0d82 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6495c96ce68c1d4088edc1f493b35e11103b0d82/xmlreader.py |
"Return the line number where the current event ends." return -1 | "Return the line number where the current event ends." return -1 | def getLineNumber(self): | 6495c96ce68c1d4088edc1f493b35e11103b0d82 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6495c96ce68c1d4088edc1f493b35e11103b0d82/xmlreader.py |
"Return the public identifier for the current event." return None | "Return the public identifier for the current event." return None | def getPublicId(self): | 6495c96ce68c1d4088edc1f493b35e11103b0d82 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6495c96ce68c1d4088edc1f493b35e11103b0d82/xmlreader.py |
"Return the system identifier for the current event." return None | "Return the system identifier for the current event." return None | def getSystemId(self): | 6495c96ce68c1d4088edc1f493b35e11103b0d82 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6495c96ce68c1d4088edc1f493b35e11103b0d82/xmlreader.py |
def set_proxy(self, proxy): self.__proxy = proxy self.type, self.__r_type = splittype(self.__proxy) self.host, XXX = splithost(self.__r_type) self.host = unquote(self.host) | def set_proxy(self, host, type): self.host, self.type = host, type | def set_proxy(self, proxy): self.__proxy = proxy # XXX this code is based on urllib, but it doesn't seem # correct. specifically, if the proxy has a port number then # splittype will return the hostname as the type and the port # will be include with everything else self.type, self.__r_type = splittype(self.__proxy) self.host, XXX = splithost(self.__r_type) self.host = unquote(self.host) self.__r_host = self.__original | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
if proto == 'http': dict = self.handle_error[proto] | if proto in ['http', 'https']: dict = self.handle_error['http'] | def error(self, proto, *args): if proto == 'http': # XXX http protocol is special cased dict = self.handle_error[proto] proto = args[2] # YUCK! meth_name = 'http_error_%d' % proto http_err = 1 orig_args = args else: dict = self.handle_error meth_name = proto + '_error' http_err = 0 args = (dict, proto, meth_name) + args result = self._call_chain(*args) if result: return result | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
if req.error_302_dict.has_key(newurl): | if len(error_302_dict)>10 or req.error_302_dict.has_key(newurl): | def http_error_302(self, req, fp, code, msg, headers): if headers.has_key('location'): newurl = headers['location'] elif headers.has_key('uri'): newurl = headers['uri'] else: return nil = fp.read() fp.close() | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
req.set_proxy(proxy) | type, r_type = splittype(proxy) host, XXX = splithost(r_type) if '@' in host: user_pass, host = host.split('@', 1) user_pass = base64.encode_string(unquote(user_passw)).strip() req.addheader('Proxy-Authorization', user_pass) host = unquote(host) req.set_proxy(host, type) | def proxy_open(self, req, proxy, type): orig_type = req.get_type() req.set_proxy(proxy) if orig_type == type: # let other handlers take care of it # XXX this only makes sense if the proxy is before the # other handlers return None else: # need to start over, because the other handlers don't # grok the proxy's URL type return self.parent.open(req) | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
common = os.path.commonprefix((base[1], test[1])) | common = posixpath.commonprefix((base[1], test[1])) | def is_suburi(self, base, test): """Check if test is below base in a URI tree | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
class HTTPBasicAuthHandler(BaseHandler): | class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr): def find_user_password(self, realm, authuri): user, password = HTTPPasswordMgr.find_user_password(self,realm,authuri) if user is not None: return user, password return HTTPPasswordMgr.find_user_password(self, None, authuri) class AbstractBasicAuthHandler: | def is_suburi(self, base, test): """Check if test is below base in a URI tree | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
def __init__(self): self.passwd = HTTPPasswordMgr() | def __init__(self, password_mgr=None): if password_mgr is None: password_mgr = HTTPPasswordMgr() self.passwd = password_mgr | def __init__(self): self.passwd = HTTPPasswordMgr() self.add_password = self.passwd.add_password self.__current_realm = None # if __current_realm is not None, then the server must have # refused our name/password and is asking for authorization # again. must be careful to set it to None on successful # return. | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
def http_error_401(self, req, fp, code, msg, headers): authreq = headers.get('www-authenticate', None) | def http_error_auth_reqed(self, authreq, host, req, headers): authreq = headers.get(authreq, None) | def http_error_401(self, req, fp, code, msg, headers): # XXX could be mult. headers authreq = headers.get('www-authenticate', None) if authreq: mo = HTTPBasicAuthHandler.rx.match(authreq) if mo: scheme, realm = mo.groups() if scheme.lower() == 'basic': return self.retry_http_basic_auth(req, realm) | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
mo = HTTPBasicAuthHandler.rx.match(authreq) | mo = AbstractBasicAuthHandler.rx.match(authreq) | def http_error_401(self, req, fp, code, msg, headers): # XXX could be mult. headers authreq = headers.get('www-authenticate', None) if authreq: mo = HTTPBasicAuthHandler.rx.match(authreq) if mo: scheme, realm = mo.groups() if scheme.lower() == 'basic': return self.retry_http_basic_auth(req, realm) | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
return self.retry_http_basic_auth(req, realm) def retry_http_basic_auth(self, req, realm): | return self.retry_http_basic_auth(host, req, realm) def retry_http_basic_auth(self, host, req, realm): | def http_error_401(self, req, fp, code, msg, headers): # XXX could be mult. headers authreq = headers.get('www-authenticate', None) if authreq: mo = HTTPBasicAuthHandler.rx.match(authreq) if mo: scheme, realm = mo.groups() if scheme.lower() == 'basic': return self.retry_http_basic_auth(req, realm) | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
host = req.get_host() | def retry_http_basic_auth(self, req, realm): if self.__current_realm is None: self.__current_realm = realm else: self.__current_realm = realm return None # XXX host isn't really the correct URI? host = req.get_host() user,pw = self.passwd.find_user_password(realm, host) if pw: raw = "%s:%s" % (user, pw) auth = base64.encodestring(raw).strip() req.add_header('Authorization', 'Basic %s' % auth) resp = self.parent.open(req) self.__current_realm = None return resp else: self.__current_realm = None return None | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
|
req.add_header('Authorization', 'Basic %s' % auth) | req.add_header(self.header, 'Basic %s' % auth) | def retry_http_basic_auth(self, req, realm): if self.__current_realm is None: self.__current_realm = realm else: self.__current_realm = realm return None # XXX host isn't really the correct URI? host = req.get_host() user,pw = self.passwd.find_user_password(realm, host) if pw: raw = "%s:%s" % (user, pw) auth = base64.encodestring(raw).strip() req.add_header('Authorization', 'Basic %s' % auth) resp = self.parent.open(req) self.__current_realm = None return resp else: self.__current_realm = None return None | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
class HTTPDigestAuthHandler(BaseHandler): """An authentication protocol defined by RFC 2069 Digest authentication improves on basic authentication because it does not transmit passwords in the clear. """ def __init__(self): self.passwd = HTTPPasswordMgr() | class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): header = 'Authorization' def http_error_401(self, req, fp, code, msg, headers): host = urlparse.urlparse(req.get_full_url())[1] return self.http_error_auth_reqed('www-authenticate', host, req, headers) class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): header = 'Proxy-Authorization' def http_error_407(self, req, fp, code, msg, headers): host = req.get_host() return self.http_error_auth_reqed('proxy-authenticate', host, req, headers) class AbstractDigestAuthHandler: def __init__(self, passwd=None): if passwd is None: passwd = HTTPPassowrdMgr() self.passwd = passwd | def retry_http_basic_auth(self, req, realm): if self.__current_realm is None: self.__current_realm = realm else: self.__current_realm = realm return None # XXX host isn't really the correct URI? host = req.get_host() user,pw = self.passwd.find_user_password(realm, host) if pw: raw = "%s:%s" % (user, pw) auth = base64.encodestring(raw).strip() req.add_header('Authorization', 'Basic %s' % auth) resp = self.parent.open(req) self.__current_realm = None return resp else: self.__current_realm = None return None | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
def http_error_401(self, req, fp, code, msg, headers): authreq = headers.get('www-authenticate', None) | def http_error_auth_reqed(self, authreq, host, req, headers): authreq = headers.get(self.header, None) | def http_error_401(self, req, fp, code, msg, headers): # XXX could be mult. headers authreq = headers.get('www-authenticate', None) if authreq: kind = authreq.split()[0] if kind == 'Digest': return self.retry_http_digest_auth(req, authreq) | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
req.add_header('Authorization', 'Digest %s' % auth) | req.add_header(self.header, 'Digest %s' % auth) | def retry_http_digest_auth(self, req, auth): token, challenge = auth.split(' ', 1) chal = parse_keqv_list(parse_http_list(challenge)) auth = self.get_authorization(req, chal) if auth: req.add_header('Authorization', 'Digest %s' % auth) resp = self.parent.open(req) self.__current_realm = None return resp | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
class HTTPHandler(BaseHandler): def http_open(self, req): | class AbstractHTTPHandler(BaseHandler): def do_open(self, http_class, req): | def encode_digest(digest): hexrep = [] for c in digest: n = (ord(c) >> 4) & 0xf hexrep.append(hex(n)[-1]) n = ord(c) & 0xf hexrep.append(hex(n)[-1]) return ''.join(hexrep) | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
h = httplib.HTTP(host) | h = http_class(host) | def http_open(self, req): # XXX devise a new mechanism to specify user/password host = req.get_host() if not host: raise URLError('no host given') | f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f7989b7d9a14a1c7a33a2eeb3dcf598d8e672f09/urllib2.py |
elif type(arg) == type(""): | elif isinstance(arg, basestring): | def load_stats(self, arg): if not arg: self.stats = {} elif type(arg) == type(""): f = open(arg, 'rb') self.stats = marshal.load(f) f.close() try: file_stats = os.stat(arg) arg = time.ctime(file_stats.st_mtime) + " " + arg except: # in case this is not unix pass self.files = [ arg ] elif hasattr(arg, 'create_stats'): arg.create_stats() self.stats = arg.stats arg.stats = {} if not self.stats: raise TypeError, "Cannot create or construct a %r object from '%r''" % ( self.__class__, arg) return | cb0b07a2e5cc7d353e2651cc12b6687403947cd0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/cb0b07a2e5cc7d353e2651cc12b6687403947cd0/pstats.py |
for version in ['8.4', '8.3', '8.2', '8.1', '8.0']: | for version in ['8.4', '84', '8.3', '83', '8.2', '82', '8.1', '81', '8.0', '80']: | def detect_tkinter(self, inc_dirs, lib_dirs): # The _tkinter module. # Assume we haven't found any of the libraries or include files tcllib = tklib = tcl_includes = tk_includes = None for version in ['8.4', '8.3', '8.2', '8.1', '8.0']: tklib = self.compiler.find_library_file(lib_dirs, 'tk' + version ) tcllib = self.compiler.find_library_file(lib_dirs, 'tcl' + version ) if tklib and tcllib: # Exit the loop when we've found the Tcl/Tk libraries break | e8590859177fc73fd38a76103065621b58a5069c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/e8590859177fc73fd38a76103065621b58a5069c/setup.py |
libs.append('X11') | if platform != "cygwin": libs.append('X11') | def detect_tkinter(self, inc_dirs, lib_dirs): # The _tkinter module. # Assume we haven't found any of the libraries or include files tcllib = tklib = tcl_includes = tk_includes = None for version in ['8.4', '8.3', '8.2', '8.1', '8.0']: tklib = self.compiler.find_library_file(lib_dirs, 'tk' + version ) tcllib = self.compiler.find_library_file(lib_dirs, 'tcl' + version ) if tklib and tcllib: # Exit the loop when we've found the Tcl/Tk libraries break | e8590859177fc73fd38a76103065621b58a5069c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/e8590859177fc73fd38a76103065621b58a5069c/setup.py |
raise ValueError("truncated header") | raise HeaderError("truncated header") | def frombuf(cls, buf): """Construct a TarInfo object from a 512 byte string buffer. """ if len(buf) != BLOCKSIZE: raise ValueError("truncated header") if buf.count(NUL) == BLOCKSIZE: raise ValueError("empty header") | 6c5bb832f0b5724716184d3dcf33270dab252de7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6c5bb832f0b5724716184d3dcf33270dab252de7/tarfile.py |
raise ValueError("empty header") | raise HeaderError("empty header") try: chksum = nti(buf[148:156]) except ValueError: raise HeaderError("invalid header") if chksum not in calc_chksums(buf): raise HeaderError("bad checksum") | def frombuf(cls, buf): """Construct a TarInfo object from a 512 byte string buffer. """ if len(buf) != BLOCKSIZE: raise ValueError("truncated header") if buf.count(NUL) == BLOCKSIZE: raise ValueError("empty header") | 6c5bb832f0b5724716184d3dcf33270dab252de7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6c5bb832f0b5724716184d3dcf33270dab252de7/tarfile.py |
tarinfo.chksum = nti(buf[148:156]) | tarinfo.chksum = chksum | def frombuf(cls, buf): """Construct a TarInfo object from a 512 byte string buffer. """ if len(buf) != BLOCKSIZE: raise ValueError("truncated header") if buf.count(NUL) == BLOCKSIZE: raise ValueError("empty header") | 6c5bb832f0b5724716184d3dcf33270dab252de7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6c5bb832f0b5724716184d3dcf33270dab252de7/tarfile.py |
if tarinfo.chksum not in calc_chksums(buf): raise ValueError("invalid header") | def frombuf(cls, buf): """Construct a TarInfo object from a 512 byte string buffer. """ if len(buf) != BLOCKSIZE: raise ValueError("truncated header") if buf.count(NUL) == BLOCKSIZE: raise ValueError("empty header") | 6c5bb832f0b5724716184d3dcf33270dab252de7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6c5bb832f0b5724716184d3dcf33270dab252de7/tarfile.py |
|
except ValueError, e: | except HeaderError, e: | def next(self): """Return the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available. """ self._check("ra") if self.firstmember is not None: m = self.firstmember self.firstmember = None return m | 6c5bb832f0b5724716184d3dcf33270dab252de7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6c5bb832f0b5724716184d3dcf33270dab252de7/tarfile.py |
self._dbg(2, "0x%X: empty or invalid block: %s" % (self.offset, e)) | self._dbg(2, "0x%X: %s" % (self.offset, e)) | def next(self): """Return the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available. """ self._check("ra") if self.firstmember is not None: m = self.firstmember self.firstmember = None return m | 6c5bb832f0b5724716184d3dcf33270dab252de7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6c5bb832f0b5724716184d3dcf33270dab252de7/tarfile.py |
raise ReadError("empty, unreadable or compressed " "file: %s" % e) | raise ReadError(str(e)) | def next(self): """Return the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available. """ self._check("ra") if self.firstmember is not None: m = self.firstmember self.firstmember = None return m | 6c5bb832f0b5724716184d3dcf33270dab252de7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6c5bb832f0b5724716184d3dcf33270dab252de7/tarfile.py |
raise RuntimeError, "no clue how to do this on Mac OS" | if not os.path.isabs(pathname): return os.path.join(new_root, pathname) else: elements = string.split(pathname, ":", 1) pathname = ":" + elements[1] return os.path.join(new_root, pathname) | def change_root (new_root, pathname): """Return 'pathname' with 'new_root' prepended. If 'pathname' is relative, this is equivalent to "os.path.join(new_root,pathname)". Otherwise, it requires making 'pathname' relative and then joining the two, which is tricky on DOS/Windows and Mac OS. """ if os.name == 'posix': if not os.path.isabs (pathname): return os.path.join (new_root, pathname) else: return os.path.join (new_root, pathname[1:]) elif os.name == 'nt': (drive, path) = os.path.splitdrive (pathname) if path[0] == '\\': path = path[1:] return os.path.join (new_root, path) elif os.name == 'mac': raise RuntimeError, "no clue how to do this on Mac OS" else: raise DistutilsPlatformError, \ "nothing known about platform '%s'" % os.name | c29ba1843eb62822592c0dad6b461b233d44f873 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c29ba1843eb62822592c0dad6b461b233d44f873/util.py |
version = "HTTP/0.9" | def handle(self): | 8268bcaa11f9fa84fdd45982a4f47f2530f16c37 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/8268bcaa11f9fa84fdd45982a4f47f2530f16c37/BaseHTTPServer.py |
|
self.send_error(400, "Bad request syntax (%s)" % `command`) | self.send_error(400, "Bad request syntax (%s)" % `requestline`) | def handle(self): | 8268bcaa11f9fa84fdd45982a4f47f2530f16c37 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/8268bcaa11f9fa84fdd45982a4f47f2530f16c37/BaseHTTPServer.py |
self.send_error(501, "Unsupported method (%s)" % `command`) | self.send_error(501, "Unsupported method (%s)" % `mname`) | def handle(self): | 8268bcaa11f9fa84fdd45982a4f47f2530f16c37 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/8268bcaa11f9fa84fdd45982a4f47f2530f16c37/BaseHTTPServer.py |
slave_fd = _slave_open(slave_name) | slave_fd = slave_open(slave_name) | def openpty(): """openpty() -> (master_fd, slave_fd) Open a pty master/slave pair, using os.openpty() if possible.""" try: return os.openpty() except (AttributeError, OSError): pass master_fd, slave_name = _open_terminal() slave_fd = _slave_open(slave_name) return master_fd, slave_fd | 8386131f9c10e6904fb3b9d68a0ff5a21c347f7e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/8386131f9c10e6904fb3b9d68a0ff5a21c347f7e/pty.py |
del ce | del riscos | def _get_exports_list(module): try: return list(module.__all__) except AttributeError: return [n for n in dir(module) if n[0] != '_'] | cb1a1d944f0eff9e69887f75d4423c4606952c7d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/cb1a1d944f0eff9e69887f75d4423c4606952c7d/os.py |
if flags & select.POLLIN: | if flags & (select.POLLIN | select.POLLPRI): | def readwrite(obj, flags): try: if flags & select.POLLIN: obj.handle_read_event() if flags & select.POLLOUT: obj.handle_write_event() except ExitNow: raise except: obj.handle_error() | 72810e023af81b3e392b36de3d86fc5484252ce0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/72810e023af81b3e392b36de3d86fc5484252ce0/asyncore.py |
self.putsequences() | self.putsequences(sequences) | def removefromallsequences(self, list): if hasattr(self, 'last') and self.last in list: del self.last sequences = self.getsequences() changed = 0 for name, seq in sequences.items(): for n in list: if n in seq: seq.remove(n) changed = 1 if not seq: del sequences[name] if changed: self.putsequences() | 0d3474161359c9f548cecb1550f7393239b81c9a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/0d3474161359c9f548cecb1550f7393239b81c9a/mhlib.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.