rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
l.extend(decoding_map_code) if decoding_table_code:
l.extend(decoding_map_code) else:
def getregentry(): return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
88a416f24d864b5c6b03a1e6abf27f80832face9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/88a416f24d864b5c6b03a1e6abf27f80832face9/gencodec.py
path, lastcomp = os.path.split(path) if not path: break removed[0:0] = [lastcomp] path = patharg while 1:
def match(self, patharg): removed = [] # First check the include directory path = patharg while 1: if self.idict.has_key(path): # We know of this path (or initial piece of path) dstpath = self.idict[path] # We do want it distributed. Tack on the tail. while removed: dstpath = os.path.join(dstpath, removed[0]) removed = removed[1:] # Finally, if the resultant string ends in a separator # tack on our input filename if dstpath[-1] == os.sep: dir, file = os.path.split(path) dstpath = os.path.join(dstpath, file) if DEBUG: print 'include', patharg, dstpath return dstpath path, lastcomp = os.path.split(path) if not path: break removed[0:0] = [lastcomp] # Next check the exclude directory path = patharg while 1: if self.edict.has_key(path): if DEBUG: print 'exclude', patharg, path return '' path, lastcomp = os.path.split(path) if not path: break removed[0:0] = [lastcomp] if DEBUG: print 'nomatch', patharg return None
d020fc141228dce96dd05e4c9c5d57a3afa1d3f8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d020fc141228dce96dd05e4c9c5d57a3afa1d3f8/MkDistr.py
class Maildir: def __init__(self, dirname): import string self.dirname = dirname self.boxes = [] newdir = os.path.join(self.dirname, 'new') for file in os.listdir(newdir): if len(string.split(file, '.')) > 2: self.boxes.append(os.path.join(newdir, file)) curdir = os.path.join(self.dirname, 'cur') for file in os.listdir(curdir): if len(string.split(file, '.')) > 2: self.boxes.append(os.path.join(curdir, file)) def next(self): if not self.boxes: return None fn = self.boxes[0] del self.boxes[0] fp = open(os.path.join(self.dirname, fn)) return rfc822.Message(fp)
def next(self): if not self.boxes: return None fn = self.boxes[0] del self.boxes[0] fp = open(os.path.join(self.dirname, fn)) return rfc822.Message(fp)
d5a2ad2a58ecb501d954435050657a744cf298de /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d5a2ad2a58ecb501d954435050657a744cf298de/mailbox.py
for key in 'MAIL', 'LOGNAME', 'USER':
for key in 'MAILDIR', 'MAIL', 'LOGNAME', 'USER':
def _test(): import time import sys import string import os args = sys.argv[1:] if not args: for key in 'MAIL', 'LOGNAME', 'USER': if os.environ.has_key(key): mbox = os.environ[key] break else: print "$MAIL, $LOGNAME nor $USER set -- who are you?" return else: mbox = args[0] if mbox[:1] == '+': mbox = os.environ['HOME'] + '/Mail/' + mbox[1:] elif not '/' in mbox: mbox = '/usr/mail/' + mbox if os.path.isdir(mbox): mb = MHMailbox(mbox) else: fp = open(mbox, 'r') mb = UnixMailbox(fp) msgs = [] while 1: msg = mb.next() if msg is None: break msgs.append(msg) msg.fp = None if len(args) > 1: num = string.atoi(args[1]) print 'Message %d body:'%num msg = msgs[num-1] msg.rewindbody() sys.stdout.write(msg.fp.read()) else: print 'Mailbox',mbox,'has',len(msgs),'messages:' for msg in msgs: f = msg.getheader('from') or "" s = msg.getheader('subject') or "" d = msg.getheader('date') or "" print '%20.20s %18.18s %-30.30s'%(f, d[5:], s)
d5a2ad2a58ecb501d954435050657a744cf298de /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d5a2ad2a58ecb501d954435050657a744cf298de/mailbox.py
mb = MHMailbox(mbox)
if os.path.isdir(os.path.join(mbox, 'cur')): mb = Maildir(mbox) else: mb = MHMailbox(mbox)
def _test(): import time import sys import string import os args = sys.argv[1:] if not args: for key in 'MAIL', 'LOGNAME', 'USER': if os.environ.has_key(key): mbox = os.environ[key] break else: print "$MAIL, $LOGNAME nor $USER set -- who are you?" return else: mbox = args[0] if mbox[:1] == '+': mbox = os.environ['HOME'] + '/Mail/' + mbox[1:] elif not '/' in mbox: mbox = '/usr/mail/' + mbox if os.path.isdir(mbox): mb = MHMailbox(mbox) else: fp = open(mbox, 'r') mb = UnixMailbox(fp) msgs = [] while 1: msg = mb.next() if msg is None: break msgs.append(msg) msg.fp = None if len(args) > 1: num = string.atoi(args[1]) print 'Message %d body:'%num msg = msgs[num-1] msg.rewindbody() sys.stdout.write(msg.fp.read()) else: print 'Mailbox',mbox,'has',len(msgs),'messages:' for msg in msgs: f = msg.getheader('from') or "" s = msg.getheader('subject') or "" d = msg.getheader('date') or "" print '%20.20s %18.18s %-30.30s'%(f, d[5:], s)
d5a2ad2a58ecb501d954435050657a744cf298de /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d5a2ad2a58ecb501d954435050657a744cf298de/mailbox.py
new = re.pcre_expand(m, repl)
new = self._expand(m, repl)
def replace_all(self, event=None): prog = self.engine.getprog() if not prog: return repl = self.replvar.get() text = self.text res = self.engine.search_text(text, prog) if not res: text.bell() return text.tag_remove("sel", "1.0", "end") text.tag_remove("hit", "1.0", "end") line = res[0] col = res[1].start() if self.engine.iswrap(): line = 1 col = 0 ok = 1 first = last = None # XXX ought to replace circular instead of top-to-bottom when wrapping text.undo_block_start() while 1: res = self.engine.search_forward(text, prog, line, col, 0, ok) if not res: break line, m = res chars = text.get("%d.0" % line, "%d.0" % (line+1)) orig = m.group() new = re.pcre_expand(m, repl) i, j = m.span() first = "%d.%d" % (line, i) last = "%d.%d" % (line, j) if new == orig: text.mark_set("insert", last) else: text.mark_set("insert", first) if first != last: text.delete(first, last) if new: text.insert(first, new) col = i + len(new) ok = 0 text.undo_block_stop() if first and last: self.show_hit(first, last) self.close()
c9498f0486963c94bf28b5367d8f2fea563d199d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c9498f0486963c94bf28b5367d8f2fea563d199d/ReplaceDialog.py
new = re.pcre_expand(m, self.replvar.get())
new = self._expand(m, self.replvar.get())
def do_replace(self): prog = self.engine.getprog() if not prog: return 0 text = self.text try: first = pos = text.index("sel.first") last = text.index("sel.last") except TclError: pos = None if not pos: first = last = pos = text.index("insert") line, col = SearchEngine.get_line_col(pos) chars = text.get("%d.0" % line, "%d.0" % (line+1)) m = prog.match(chars, col) if not prog: return 0 new = re.pcre_expand(m, self.replvar.get()) text.mark_set("insert", first) text.undo_block_start() if m.group(): text.delete(first, last) if new: text.insert(first, new) text.undo_block_stop() self.show_hit(first, text.index("insert")) self.ok = 0 return 1
c9498f0486963c94bf28b5367d8f2fea563d199d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c9498f0486963c94bf28b5367d8f2fea563d199d/ReplaceDialog.py
self.enter() self.dispatch(t.body) self.leave()
self.write(")") self.enter() self.dispatch(t.body) self.leave() def _For(self, t): self.fill("for ") self.dispatch(t.target) self.write(" in ") self.dispatch(t.iter) self.enter() self.dispatch(t.body) self.leave() if t.orelse: self.fill("else") self.enter() self.dispatch(t.orelse) self.leave
self.fill("def "+t.name + "(")
652266b90a3285ed5c943454dddd1ea5476a9f35 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/652266b90a3285ed5c943454dddd1ea5476a9f35/unparse.py
def _For(self, t): self.fill("for ") self.dispatch(t.target) self.write(" in ") self.dispatch(t.iter)
def _While(self, t): self.fill("while ") self.dispatch(t.test)
def _For(self, t): self.fill("for ") self.dispatch(t.target) self.write(" in ") self.dispatch(t.iter) self.enter() self.dispatch(t.body) self.leave() if t.orelse: self.fill("else") self.enter() self.dispatch(t.orelse) self.leave
652266b90a3285ed5c943454dddd1ea5476a9f35 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/652266b90a3285ed5c943454dddd1ea5476a9f35/unparse.py
"RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", "FloorDiv":"//"}
"LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", "FloorDiv":"//", "Pow": "**"}
def _UnaryOp(self, t): self.write(self.unop[t.op.__class__.__name__]) self.write("(") self.dispatch(t.operand) self.write(")")
652266b90a3285ed5c943454dddd1ea5476a9f35 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/652266b90a3285ed5c943454dddd1ea5476a9f35/unparse.py
self.dispatch(t.stararg)
self.dispatch(t.starargs)
def _Call(self, t): self.dispatch(t.func) self.write("(") comma = False for e in t.args: if comma: self.write(", ") else: comma = True self.dispatch(e) for e in t.keywords: if comma: self.write(", ") else: comma = True self.dispatch(e) if t.starargs: if comma: self.write(", ") else: comma = True self.write("*") self.dispatch(t.stararg) if t.kwargs: if comma: self.write(", ") else: comma = True self.write("**") self.dispatch(t.stararg) self.write(")")
652266b90a3285ed5c943454dddd1ea5476a9f35 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/652266b90a3285ed5c943454dddd1ea5476a9f35/unparse.py
self.dispatch(t.stararg)
self.dispatch(t.kwargs)
def _Call(self, t): self.dispatch(t.func) self.write("(") comma = False for e in t.args: if comma: self.write(", ") else: comma = True self.dispatch(e) for e in t.keywords: if comma: self.write(", ") else: comma = True self.dispatch(e) if t.starargs: if comma: self.write(", ") else: comma = True self.write("*") self.dispatch(t.stararg) if t.kwargs: if comma: self.write(", ") else: comma = True self.write("**") self.dispatch(t.stararg) self.write(")")
652266b90a3285ed5c943454dddd1ea5476a9f35 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/652266b90a3285ed5c943454dddd1ea5476a9f35/unparse.py
self.write("**"+self.kwarg) self.write(")") def roundtrip(filename):
self.write("**"+t.kwarg) def _keyword(self, t): self.write(t.arg) self.write("=") self.dispatch(t.value) def _Lambda(self, t): self.write("lambda ") self.dispatch(t.args) self.write(": ") self.dispatch(t.body) def roundtrip(filename, output=sys.stdout):
def _arguments(self, t): first = True nonDef = len(t.args)-len(t.defaults) for a in t.args[0:nonDef]: if first:first = False else: self.write(", ") self.dispatch(a) for a,d in zip(t.args[nonDef:], t.defaults): if first:first = False else: self.write(", ") self.dispatch(a), self.write("=") self.dispatch(d) if t.vararg: if first:first = False else: self.write(", ") self.write("*"+t.vararg) if t.kwarg: if first:first = False else: self.write(", ") self.write("**"+self.kwarg) self.write(")")
652266b90a3285ed5c943454dddd1ea5476a9f35 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/652266b90a3285ed5c943454dddd1ea5476a9f35/unparse.py
Unparser(tree)
Unparser(tree, output) def testdir(a): try: names = [n for n in os.listdir(a) if n.endswith('.py')] except OSError: print >> sys.stderr, "Directory not readable: %s" % a else: for n in names: fullname = os.path.join(a, n) if os.path.isfile(fullname): output = cStringIO.StringIO() print 'Testing %s' % fullname try: roundtrip(fullname, output) except Exception, e: print ' Failed to compile, exception is %s' % repr(e) elif os.path.isdir(fullname): testdir(fullname) def main(args): if args[0] == '--testdir': for a in args[1:]: testdir(a) else: for a in args: roundtrip(a)
def roundtrip(filename): source = open(filename).read() tree = compile(source, filename, "exec", 0x400) Unparser(tree)
652266b90a3285ed5c943454dddd1ea5476a9f35 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/652266b90a3285ed5c943454dddd1ea5476a9f35/unparse.py
roundtrip(sys.argv[1])
main(sys.argv[1:])
def roundtrip(filename): source = open(filename).read() tree = compile(source, filename, "exec", 0x400) Unparser(tree)
652266b90a3285ed5c943454dddd1ea5476a9f35 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/652266b90a3285ed5c943454dddd1ea5476a9f35/unparse.py
def __init__(self):
_locator = None document = None def __init__(self, documentFactory=None): self.documentFactory = documentFactory
def __init__(self): self.firstEvent = [None, None] self.lastEvent = self.firstEvent self._ns_contexts = [{}] # contains uri -> prefix dicts self._current_context = self._ns_contexts[-1]
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
def setDocumentLocator(self, locator): pass
def setDocumentLocator(self, locator): self._locator = locator
def setDocumentLocator(self, locator): pass
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
self._current_context[uri] = prefix
self._current_context[uri] = prefix or ''
def startPrefixMapping(self, prefix, uri): self._ns_contexts.append(self._current_context.copy()) self._current_context[uri] = prefix
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
del self._ns_contexts[-1]
self._current_context = self._ns_contexts.pop()
def endPrefixMapping(self, prefix): del self._ns_contexts[-1]
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
uri,localname = name
uri, localname = name
def startElementNS(self, name, tagName , attrs): uri,localname = name if uri: # When using namespaces, the reader may or may not # provide us with the original name. If not, create # *a* valid tagName from the current context. if tagName is None: tagName = self._current_context[uri] + ":" + localname node = self.document.createElementNS(uri, tagName) else: # When the tagname is not prefixed, it just appears as # localname node = self.document.createElement(localname)
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
parent = self.curNode node.parentNode = parent
node.parentNode = self.curNode
def startElementNS(self, name, tagName , attrs): uri,localname = name if uri: # When using namespaces, the reader may or may not # provide us with the original name. If not, create # *a* valid tagName from the current context. if tagName is None: tagName = self._current_context[uri] + ":" + localname node = self.document.createElementNS(uri, tagName) else: # When the tagname is not prefixed, it just appears as # localname node = self.document.createElement(localname)
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
self.curNode = node.parentNode
self.curNode = self.curNode.parentNode
def endElementNS(self, name, tagName): node = self.curNode self.lastEvent[1] = [(END_ELEMENT, node), None] self.lastEvent = self.lastEvent[1] #self.events.append((END_ELEMENT, node)) self.curNode = node.parentNode
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
parent = self.curNode node.parentNode = parent
node.parentNode = self.curNode
def startElement(self, name, attrs): node = self.document.createElement(name)
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
node = self.document.createTextNode(chars[start:start + length])
node = self.document.createTextNode(chars)
def ignorableWhitespace(self, chars): node = self.document.createTextNode(chars[start:start + length]) parent = self.curNode node.parentNode = parent self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None] self.lastEvent = self.lastEvent[1] #self.events.append((IGNORABLE_WHITESPACE, node))
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
node = self.curNode = self.document = minidom.Document() node.parentNode = None
publicId = systemId = None if self._locator: publicId = self._locator.getPublicId() systemId = self._locator.getSystemId() if self.documentFactory is None: import xml.dom.minidom self.documentFactory = xml.dom.minidom.Document.implementation node = self.documentFactory.createDocument(None, publicId, systemId) self.curNode = self.document = node
def startDocument(self): node = self.curNode = self.document = minidom.Document() node.parentNode = None self.lastEvent[1] = [(START_DOCUMENT, node), None] self.lastEvent = self.lastEvent[1] #self.events.append((START_DOCUMENT, node))
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
assert not self.curNode.parentNode for node in self.curNode.childNodes: if node.nodeType == node.ELEMENT_NODE: self.document.documentElement = node
assert self.curNode.parentNode is None, \ "not all elements have been properly closed" assert self.curNode.documentElement is not None, \ "document does not contain a root element" node = self.curNode.documentElement
def endDocument(self): assert not self.curNode.parentNode for node in self.curNode.childNodes: if node.nodeType == node.ELEMENT_NODE: self.document.documentElement = node #if not self.document.documentElement: # raise Error, "No document element"
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
self.parser.setFeature(xml.sax.handler.feature_namespaces,1)
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
def reset(self): self.pulldom = PullDOM() # This content handler relies on namespace support self.parser.setFeature(xml.sax.handler.feature_namespaces,1) self.parser.setContentHandler(self.pulldom)
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
buf=self.stream.read(self.bufsize)
buf = self.stream.read(self.bufsize)
def getEvent(self): if not self.pulldom.firstEvent[1]: self.pulldom.lastEvent = self.pulldom.firstEvent while not self.pulldom.firstEvent[1]: buf=self.stream.read(self.bufsize) if not buf: #FIXME: why doesn't Expat close work? #self.parser.close() return None self.parser.feed(buf) rc = self.pulldom.firstEvent[1][0] self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1] return rc
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
def parse(stream_or_string, parser=None, bufsize=default_bufsize): if type(stream_or_string) is type(""):
def parse(stream_or_string, parser=None, bufsize=None): if bufsize is None: bufsize = default_bufsize if type(stream_or_string) in [type(""), type(u"")]:
def parse(stream_or_string, parser=None, bufsize=default_bufsize): if type(stream_or_string) is type(""): stream = open(stream_or_string) else: stream = stream_or_string if not parser: parser = xml.sax.make_parser() return DOMEventStream(stream, parser, bufsize)
500fca110f005eb10fd581019263b28b27fed1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/500fca110f005eb10fd581019263b28b27fed1ca/pulldom.py
def run (self):
fed466f66accd5e175a8ff6c489589b6c5ec887b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/fed466f66accd5e175a8ff6c489589b6c5ec887b/bdist_rpm.py
srpms = glob.glob(os.path.join(rpm_dir['SRPMS'], "*.rpm")) assert len(srpms) == 1, \ "unexpected number of SRPM files found: %s" % srpms dist_file = ('bdist_rpm', 'any', self._dist_path(srpms[0])) self.distribution.dist_files.append(dist_file) self.move_file(srpms[0], self.dist_dir)
srpm = os.path.join(rpm_dir['SRPMS'], source_rpm) assert(os.path.exists(srpm)) self.move_file(srpm, self.dist_dir)
def run (self):
fed466f66accd5e175a8ff6c489589b6c5ec887b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/fed466f66accd5e175a8ff6c489589b6c5ec887b/bdist_rpm.py
rpms = glob.glob(os.path.join(rpm_dir['RPMS'], "*/*.rpm")) debuginfo = glob.glob(os.path.join(rpm_dir['RPMS'], "*/*debuginfo*.rpm")) if debuginfo: rpms.remove(debuginfo[0]) assert len(rpms) == 1, \ "unexpected number of RPM files found: %s" % rpms dist_file = ('bdist_rpm', get_python_version(), self._dist_path(rpms[0])) self.distribution.dist_files.append(dist_file) self.move_file(rpms[0], self.dist_dir) if debuginfo: dist_file = ('bdist_rpm', get_python_version(), self._dist_path(debuginfo[0])) self.move_file(debuginfo[0], self.dist_dir)
for rpm in binary_rpms: rpm = os.path.join(rpm_dir['RPMS'], rpm) if os.path.exists(rpm): self.move_file(rpm, self.dist_dir)
def run (self):
fed466f66accd5e175a8ff6c489589b6c5ec887b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/fed466f66accd5e175a8ff6c489589b6c5ec887b/bdist_rpm.py
if headers.has_key('www-authenticate'): stuff = headers['www-authenticate'] import re match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) if match: scheme, realm = match.groups() if scheme.lower() == 'basic': name = 'retry_' + self.type + '_basic_auth' if data is None: return getattr(self,name)(url, realm) else: return getattr(self,name)(url, realm, data)
if not headers.has_key('www-authenticate'): URLopener.http_error_default(self, url, fp, errmsg, headers) stuff = headers['www-authenticate'] import re match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) if not match: URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) scheme, realm = match.groups() if scheme.lower() != 'basic': URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) name = 'retry_' + self.type + '_basic_auth' if data is None: return getattr(self,name)(url, realm) else: return getattr(self,name)(url, realm, data)
def http_error_401(self, url, fp, errcode, errmsg, headers, data=None): """Error 401 -- authentication required. See this URL for a description of the basic authentication scheme: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt""" if headers.has_key('www-authenticate'): stuff = headers['www-authenticate'] import re match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) if match: scheme, realm = match.groups() if scheme.lower() == 'basic': name = 'retry_' + self.type + '_basic_auth' if data is None: return getattr(self,name)(url, realm) else: return getattr(self,name)(url, realm, data)
0ef205f9c5e4a66f4da9e2208d192b9e4407eae6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/0ef205f9c5e4a66f4da9e2208d192b9e4407eae6/urllib.py
preprocessor=cc + " -E",
preprocessor=cpp,
def customize_compiler(compiler): """Do any platform-specific customization of a CCompiler instance. Mainly needed on Unix, so we can plug in the information that varies across Unices and is stored in Python's Makefile. """ if compiler.compiler_type == "unix": (cc, opt, ccshared, ldshared, so_ext) = \ get_config_vars('CC', 'OPT', 'CCSHARED', 'LDSHARED', 'SO') cc_cmd = cc + ' ' + opt compiler.set_executables( preprocessor=cc + " -E", # not always! compiler=cc_cmd, compiler_so=cc_cmd + ' ' + ccshared, linker_so=ldshared, linker_exe=cc) compiler.shared_lib_extension = so_ext
3d1f13f7aa37750db04f89c5b85fdd3f125033a3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3d1f13f7aa37750db04f89c5b85fdd3f125033a3/sysconfig.py
if self.tkconsole.closing: return
def poll_subprocess(self): clt = self.rpcclt if clt is None: return try: response = clt.pollresponse(self.active_seq, wait=0.05) except (EOFError, IOError, KeyboardInterrupt): # lost connection or subprocess terminated itself, restart # [the KBI is from rpc.SocketIO.handle_EOF()] if self.tkconsole.closing: return response = None self.restart_subprocess() self.tkconsole.endexecuting() if response: self.tkconsole.resetoutput() self.active_seq = None how, what = response console = self.tkconsole.console if how == "OK": if what is not None: print >>console, `what` elif how == "EXCEPTION": if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"): self.remote_stack_viewer() elif how == "ERROR": errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n" print >>sys.__stderr__, errmsg, what print >>console, errmsg, what # we received a response to the currently active seq number: self.tkconsole.endexecuting() # Reschedule myself in 50 ms self.tkconsole.text.after(50, self.poll_subprocess)
c972a8080ead593831900b1033e83bae7763ce08 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c972a8080ead593831900b1033e83bae7763ce08/PyShell.py
self.tkconsole.text.after(50, self.poll_subprocess)
if not self.tkconsole.closing: self.tkconsole.text.after(self.tkconsole.pollinterval, self.poll_subprocess)
def poll_subprocess(self): clt = self.rpcclt if clt is None: return try: response = clt.pollresponse(self.active_seq, wait=0.05) except (EOFError, IOError, KeyboardInterrupt): # lost connection or subprocess terminated itself, restart # [the KBI is from rpc.SocketIO.handle_EOF()] if self.tkconsole.closing: return response = None self.restart_subprocess() self.tkconsole.endexecuting() if response: self.tkconsole.resetoutput() self.active_seq = None how, what = response console = self.tkconsole.console if how == "OK": if what is not None: print >>console, `what` elif how == "EXCEPTION": if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"): self.remote_stack_viewer() elif how == "ERROR": errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n" print >>sys.__stderr__, errmsg, what print >>console, errmsg, what # we received a response to the currently active seq number: self.tkconsole.endexecuting() # Reschedule myself in 50 ms self.tkconsole.text.after(50, self.poll_subprocess)
c972a8080ead593831900b1033e83bae7763ce08 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c972a8080ead593831900b1033e83bae7763ce08/PyShell.py
return EditorWindow.close(self)
self.closing = True self.text.after(2 * self.pollinterval, self.close2) def close2(self): return EditorWindow.close(self)
def close(self): "Extend EditorWindow.close()" if self.executing: response = tkMessageBox.askokcancel( "Kill?", "The program is still running!\n Do you want to kill it?", default="ok", parent=self.text) if response == False: return "cancel" # interrupt the subprocess self.canceled = True if use_subprocess: self.interp.interrupt_subprocess() return "cancel" else: return EditorWindow.close(self)
c972a8080ead593831900b1033e83bae7763ce08 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c972a8080ead593831900b1033e83bae7763ce08/PyShell.py
'\(
'\(
def processor(): """ Returns the (true) processor name, e.g. 'amdk6' An empty string is returned if the value cannot be determined. Note that many platforms do not provide this information or simply return the same value as for machine(), e.g. NetBSD does this. """ return uname()[5]
5b95895db3adb860691f98579d478f9281596f88 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5b95895db3adb860691f98579d478f9281596f88/platform.py
buildno = int(buildno)
def _sys_version(): """ Returns a parsed version of Python's sys.version as tuple (version, buildno, builddate, compiler) referring to the Python version, build number, build date/time as string and the compiler identification string. Note that unlike the Python sys.version, the returned value for the Python version will always include the patchlevel (it defaults to '.0'). """ global _sys_version_cache if _sys_version_cache is not None: return _sys_version_cache version, buildno, builddate, buildtime, compiler = \ _sys_version_parser.match(sys.version).groups() buildno = int(buildno) builddate = builddate + ' ' + buildtime l = string.split(version, '.') if len(l) == 2: l.append('0') version = string.join(l, '.') _sys_version_cache = (version, buildno, builddate, compiler) return _sys_version_cache
5b95895db3adb860691f98579d478f9281596f88 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5b95895db3adb860691f98579d478f9281596f88/platform.py
def _parsegen(self): # Create a new message and start by parsing headers. self._new_message() headers = [] # Collect the headers, searching for a line that doesn't match the RFC # 2822 header or continuation pattern (including an empty line). for line in self._input: if line is NeedMoreData: yield NeedMoreData continue if not headerRE.match(line): # If we saw the RFC defined header/body separator # (i.e. newline), just throw it away. Otherwise the line is # part of the body so push it back. if not NLCRE.match(line): self._input.unreadline(line) break headers.append(line) # Done with the headers, so parse them and figure out what we're # supposed to see in the body of the message. self._parse_headers(headers) # Headers-only parsing is a backwards compatibility hack, which was # necessary in the older parser, which could throw errors. All # remaining lines in the input are thrown into the message body. if self._headersonly: lines = [] while True: line = self._input.readline() if line is NeedMoreData: yield NeedMoreData continue if line == '': break lines.append(line) self._cur.set_payload(EMPTYSTRING.join(lines)) return if self._cur.get_content_type() == 'message/delivery-status': # message/delivery-status contains blocks of headers separated by # a blank line. We'll represent each header block as a separate # nested message object, but the processing is a bit different # than standard message/* types because there is no body for the # nested messages. A blank line separates the subparts. while True: self._input.push_eof_matcher(NLCRE.match) for retval in self._parsegen(): if retval is NeedMoreData: yield NeedMoreData continue break msg = self._pop_message() # We need to pop the EOF matcher in order to tell if we're at # the end of the current file, not the end of the last block # of message headers. self._input.pop_eof_matcher() # The input stream must be sitting at the newline or at the # EOF. We want to see if we're at the end of this subpart, so # first consume the blank line, then test the next line to see # if we're at this subpart's EOF. line = self._input.readline() line = self._input.readline() if line == '': break # Not at EOF so this is a line we're going to need. self._input.unreadline(line) return if self._cur.get_content_maintype() == 'message': # The message claims to be a message/* type, then what follows is # another RFC 2822 message. for retval in self._parsegen(): if retval is NeedMoreData: yield NeedMoreData continue break self._pop_message() return if self._cur.get_content_maintype() == 'multipart': boundary = self._cur.get_boundary() if boundary is None: # The message /claims/ to be a multipart but it has not # defined a boundary. That's a problem which we'll handle by # reading everything until the EOF and marking the message as # defective. self._cur.defects.append(Errors.NoBoundaryInMultipartDefect()) lines = [] for line in self._input: if line is NeedMoreData: yield NeedMoreData continue lines.append(line) self._cur.set_payload(EMPTYSTRING.join(lines)) return # Create a line match predicate which matches the inter-part # boundary as well as the end-of-multipart boundary. Don't push # this onto the input stream until we've scanned past the # preamble. separator = '--' + boundary boundaryre = re.compile( '(?P<sep>' + re.escape(separator) + r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)$') capturing_preamble = True preamble = [] linesep = False while True: line = self._input.readline() if line is NeedMoreData: yield NeedMoreData continue if line == '': break mo = boundaryre.match(line) if mo: # If we're looking at the end boundary, we're done with # this multipart. If there was a newline at the end of # the closing boundary, then we need to initialize the # epilogue with the empty string (see below). if mo.group('end'): linesep = mo.group('linesep') break # We saw an inter-part boundary. Were we in the preamble? if capturing_preamble: if preamble: # According to RFC 2046, the last newline belongs # to the boundary. lastline = preamble[-1] eolmo = NLCRE_eol.search(lastline) if eolmo: preamble[-1] = lastline[:-len(eolmo.group(0))] self._cur.preamble = EMPTYSTRING.join(preamble) #import pdb ; pdb.set_trace() # See SF bug #1030941 capturing_preamble = False self._input.unreadline(line) continue # We saw a boundary separating two parts. Consume any # multiple boundary lines that may be following. Our # interpretation of RFC 2046 BNF grammar does not produce # body parts within such double boundaries. while True: line = self._input.readline() if line is NeedMoreData: yield NeedMoreData continue mo = boundaryre.match(line) if not mo: self._input.unreadline(line) break # Recurse to parse this subpart; the input stream points # at the subpart's first line. self._input.push_eof_matcher(boundaryre.match) for retval in self._parsegen(): if retval is NeedMoreData: yield NeedMoreData continue break # Because of RFC 2046, the newline preceding the boundary # separator actually belongs to the boundary, not the # previous subpart's payload (or epilogue if the previous # part is a multipart). if self._last.get_content_maintype() == 'multipart': epilogue = self._last.epilogue if epilogue == '': self._last.epilogue = None elif epilogue is not None: mo = NLCRE_eol.search(epilogue) if mo: end = len(mo.group(0)) self._last.epilogue = epilogue[:-end] else: payload = self._last.get_payload() if isinstance(payload, basestring): mo = NLCRE_eol.search(payload) if mo: payload = payload[:-len(mo.group(0))] self._last.set_payload(payload) self._input.pop_eof_matcher() self._pop_message() # Set the multipart up for newline cleansing, which will # happen if we're in a nested multipart. self._last = self._cur else: # I think we must be in the preamble assert capturing_preamble preamble.append(line) # We've seen either the EOF or the end boundary. If we're still # capturing the preamble, we never saw the start boundary. Note # that as a defect and store the captured text as the payload. # Otherwise everything from here to the EOF is epilogue. if capturing_preamble: self._cur.defects.append(Errors.StartBoundaryNotFoundDefect()) self._cur.set_payload(EMPTYSTRING.join(preamble)) return # If the end boundary ended in a newline, we'll need to make sure # the epilogue isn't None if linesep: epilogue = [''] else: epilogue = [] for line in self._input: if line is NeedMoreData: yield NeedMoreData continue epilogue.append(line) # Any CRLF at the front of the epilogue is not technically part of # the epilogue. Also, watch out for an empty string epilogue, # which means a single newline. if epilogue: firstline = epilogue[0] bolmo = NLCRE_bol.match(firstline) if bolmo: epilogue[0] = firstline[len(bolmo.group(0)):] self._cur.epilogue = EMPTYSTRING.join(epilogue) return # Otherwise, it's some non-multipart type, so the entire rest of the # file contents becomes the payload. lines = [] for line in self._input: if line is NeedMoreData: yield NeedMoreData continue lines.append(line) self._cur.set_payload(EMPTYSTRING.join(lines))
3d852a6e3e65adac018ab2edf8b5a23d10ba5acb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3d852a6e3e65adac018ab2edf8b5a23d10ba5acb/FeedParser.py
def _parsegen(self): # Create a new message and start by parsing headers. self._new_message() headers = [] # Collect the headers, searching for a line that doesn't match the RFC # 2822 header or continuation pattern (including an empty line). for line in self._input: if line is NeedMoreData: yield NeedMoreData continue if not headerRE.match(line): # If we saw the RFC defined header/body separator # (i.e. newline), just throw it away. Otherwise the line is # part of the body so push it back. if not NLCRE.match(line): self._input.unreadline(line) break headers.append(line) # Done with the headers, so parse them and figure out what we're # supposed to see in the body of the message. self._parse_headers(headers) # Headers-only parsing is a backwards compatibility hack, which was # necessary in the older parser, which could throw errors. All # remaining lines in the input are thrown into the message body. if self._headersonly: lines = [] while True: line = self._input.readline() if line is NeedMoreData: yield NeedMoreData continue if line == '': break lines.append(line) self._cur.set_payload(EMPTYSTRING.join(lines)) return if self._cur.get_content_type() == 'message/delivery-status': # message/delivery-status contains blocks of headers separated by # a blank line. We'll represent each header block as a separate # nested message object, but the processing is a bit different # than standard message/* types because there is no body for the # nested messages. A blank line separates the subparts. while True: self._input.push_eof_matcher(NLCRE.match) for retval in self._parsegen(): if retval is NeedMoreData: yield NeedMoreData continue break msg = self._pop_message() # We need to pop the EOF matcher in order to tell if we're at # the end of the current file, not the end of the last block # of message headers. self._input.pop_eof_matcher() # The input stream must be sitting at the newline or at the # EOF. We want to see if we're at the end of this subpart, so # first consume the blank line, then test the next line to see # if we're at this subpart's EOF. line = self._input.readline() line = self._input.readline() if line == '': break # Not at EOF so this is a line we're going to need. self._input.unreadline(line) return if self._cur.get_content_maintype() == 'message': # The message claims to be a message/* type, then what follows is # another RFC 2822 message. for retval in self._parsegen(): if retval is NeedMoreData: yield NeedMoreData continue break self._pop_message() return if self._cur.get_content_maintype() == 'multipart': boundary = self._cur.get_boundary() if boundary is None: # The message /claims/ to be a multipart but it has not # defined a boundary. That's a problem which we'll handle by # reading everything until the EOF and marking the message as # defective. self._cur.defects.append(Errors.NoBoundaryInMultipartDefect()) lines = [] for line in self._input: if line is NeedMoreData: yield NeedMoreData continue lines.append(line) self._cur.set_payload(EMPTYSTRING.join(lines)) return # Create a line match predicate which matches the inter-part # boundary as well as the end-of-multipart boundary. Don't push # this onto the input stream until we've scanned past the # preamble. separator = '--' + boundary boundaryre = re.compile( '(?P<sep>' + re.escape(separator) + r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)$') capturing_preamble = True preamble = [] linesep = False while True: line = self._input.readline() if line is NeedMoreData: yield NeedMoreData continue if line == '': break mo = boundaryre.match(line) if mo: # If we're looking at the end boundary, we're done with # this multipart. If there was a newline at the end of # the closing boundary, then we need to initialize the # epilogue with the empty string (see below). if mo.group('end'): linesep = mo.group('linesep') break # We saw an inter-part boundary. Were we in the preamble? if capturing_preamble: if preamble: # According to RFC 2046, the last newline belongs # to the boundary. lastline = preamble[-1] eolmo = NLCRE_eol.search(lastline) if eolmo: preamble[-1] = lastline[:-len(eolmo.group(0))] self._cur.preamble = EMPTYSTRING.join(preamble) #import pdb ; pdb.set_trace() # See SF bug #1030941 capturing_preamble = False self._input.unreadline(line) continue # We saw a boundary separating two parts. Consume any # multiple boundary lines that may be following. Our # interpretation of RFC 2046 BNF grammar does not produce # body parts within such double boundaries. while True: line = self._input.readline() if line is NeedMoreData: yield NeedMoreData continue mo = boundaryre.match(line) if not mo: self._input.unreadline(line) break # Recurse to parse this subpart; the input stream points # at the subpart's first line. self._input.push_eof_matcher(boundaryre.match) for retval in self._parsegen(): if retval is NeedMoreData: yield NeedMoreData continue break # Because of RFC 2046, the newline preceding the boundary # separator actually belongs to the boundary, not the # previous subpart's payload (or epilogue if the previous # part is a multipart). if self._last.get_content_maintype() == 'multipart': epilogue = self._last.epilogue if epilogue == '': self._last.epilogue = None elif epilogue is not None: mo = NLCRE_eol.search(epilogue) if mo: end = len(mo.group(0)) self._last.epilogue = epilogue[:-end] else: payload = self._last.get_payload() if isinstance(payload, basestring): mo = NLCRE_eol.search(payload) if mo: payload = payload[:-len(mo.group(0))] self._last.set_payload(payload) self._input.pop_eof_matcher() self._pop_message() # Set the multipart up for newline cleansing, which will # happen if we're in a nested multipart. self._last = self._cur else: # I think we must be in the preamble assert capturing_preamble preamble.append(line) # We've seen either the EOF or the end boundary. If we're still # capturing the preamble, we never saw the start boundary. Note # that as a defect and store the captured text as the payload. # Otherwise everything from here to the EOF is epilogue. if capturing_preamble: self._cur.defects.append(Errors.StartBoundaryNotFoundDefect()) self._cur.set_payload(EMPTYSTRING.join(preamble)) return # If the end boundary ended in a newline, we'll need to make sure # the epilogue isn't None if linesep: epilogue = [''] else: epilogue = [] for line in self._input: if line is NeedMoreData: yield NeedMoreData continue epilogue.append(line) # Any CRLF at the front of the epilogue is not technically part of # the epilogue. Also, watch out for an empty string epilogue, # which means a single newline. if epilogue: firstline = epilogue[0] bolmo = NLCRE_bol.match(firstline) if bolmo: epilogue[0] = firstline[len(bolmo.group(0)):] self._cur.epilogue = EMPTYSTRING.join(epilogue) return # Otherwise, it's some non-multipart type, so the entire rest of the # file contents becomes the payload. lines = [] for line in self._input: if line is NeedMoreData: yield NeedMoreData continue lines.append(line) self._cur.set_payload(EMPTYSTRING.join(lines))
3d852a6e3e65adac018ab2edf8b5a23d10ba5acb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3d852a6e3e65adac018ab2edf8b5a23d10ba5acb/FeedParser.py
if self.distribution.has_ext_modules():
if not self.skip_build and self.distribution.has_ext_modules():
def finalize_options (self): if self.bdist_dir is None: bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'wininst') if not self.target_version: self.target_version = "" if self.distribution.has_ext_modules(): short_version = get_python_version() if self.target_version and self.target_version != short_version: raise DistutilsOptionError, \ "target version can only be" + short_version self.target_version = short_version
c6bfd80fd5f7756209c6fb82867007f2922cc589 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c6bfd80fd5f7756209c6fb82867007f2922cc589/bdist_wininst.py
"target version can only be" + short_version
"target version can only be %s, or the '--skip_build'" \ " option must be specified" % (short_version,)
def finalize_options (self): if self.bdist_dir is None: bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'wininst') if not self.target_version: self.target_version = "" if self.distribution.has_ext_modules(): short_version = get_python_version() if self.target_version and self.target_version != short_version: raise DistutilsOptionError, \ "target version can only be" + short_version self.target_version = short_version
c6bfd80fd5f7756209c6fb82867007f2922cc589 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c6bfd80fd5f7756209c6fb82867007f2922cc589/bdist_wininst.py
build_info = "Build %s with distutils-%s" % \
build_info = "Built %s with distutils-%s" % \
def get_inidata (self): # Return data describing the installation.
c6bfd80fd5f7756209c6fb82867007f2922cc589 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c6bfd80fd5f7756209c6fb82867007f2922cc589/bdist_wininst.py
`fileName`+' .')
`fn`+' .', parent=self)
def ViewFile(self, viewTitle, viewFile, encoding=None): fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), viewFile) if encoding: import codecs try: textFile = codecs.open(fn, 'r') except IOError: tkMessageBox.showerror(title='File Load Error', message='Unable to load file '+ `fileName`+' .') return else: data = textFile.read() else: data = None textView.TextViewer(self, viewTitle, fn, data=data)
576aa62d6226aa45103eb6de962516a995360113 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/576aa62d6226aa45103eb6de962516a995360113/aboutDialog.py
topdir = os.getcwd() + 'build/rpm'
def run (self):
eab27e36d935b246a5c2dcf496d3e5fabe1f3204 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/eab27e36d935b246a5c2dcf496d3e5fabe1f3204/bdist_rpm.py
'_topdir ' + os.getcwd() + '/build/rpm',])
'_topdir %s/%s' % (os.getcwd(), rpm_base),])
def run (self):
eab27e36d935b246a5c2dcf496d3e5fabe1f3204 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/eab27e36d935b246a5c2dcf496d3e5fabe1f3204/bdist_rpm.py
chars = self.text.get("1.0", "end-1c")
chars = str(self.text.get("1.0", "end-1c"))
def writefile(self, filename): self.fixlastline() try: f = open(filename, "w") chars = self.text.get("1.0", "end-1c") f.write(chars) f.close() ## print "saved to", `filename` return 1 except IOError, msg: tkMessageBox.showerror("I/O Error", str(msg), master=self.text) return 0
7b324717f75a4ebf3f6741d127c92ddf0a464946 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/7b324717f75a4ebf3f6741d127c92ddf0a464946/IOBinding.py
def __init__(self, get, set=None):
def __init__(self, get, set=None, delete=None):
def __init__(self, get, set=None): self.__get = get self.__set = set
550fa08c46613e82db470dc3549ebda141904676 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/550fa08c46613e82db470dc3549ebda141904676/test_descr.py
x = computed_attribute(__get_x, __set_x)
def __delete_x(self): del self.__x x = computed_attribute(__get_x, __set_x, __delete_x)
def __set_x(self, x): self.__x = x
550fa08c46613e82db470dc3549ebda141904676 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/550fa08c46613e82db470dc3549ebda141904676/test_descr.py
C.x.__delete__(a) verify(not hasattr(a, "x"))
def delx(self): del self.__x
550fa08c46613e82db470dc3549ebda141904676 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/550fa08c46613e82db470dc3549ebda141904676/test_descr.py
__call__ = run
def __call__(self, *args, **kwds): return self.run(*args, **kwds)
def run(self, result=None): if result is None: result = self.defaultTestResult() result.startTest(self) testMethod = getattr(self, self.__testMethodName) try: try: self.setUp() except KeyboardInterrupt: raise except: result.addError(self, self.__exc_info()) return
75609c01adc8a9bfa1010c2c90088d91f72b6979 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/75609c01adc8a9bfa1010c2c90088d91f72b6979/unittest.py
return self(result) def __call__(self, result):
def run(self, result): return self(result)
75609c01adc8a9bfa1010c2c90088d91f72b6979 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/75609c01adc8a9bfa1010c2c90088d91f72b6979/unittest.py
print "usage:", sys.argv[0], "file ..."
print "usage:", sys.argv[0], "[-t tabwidth] file ..."
def main(): tabsize = 8 try: opts, args = getopt.getopt(sys.argv[1:], "t:") if not args: raise getopt.error, "At least one file argument required" except getopt.error, msg: print msg print "usage:", sys.argv[0], "file ..." return for file in args: process(file, tabsize)
6ad57e179390a3aef9909c5a8753284a0d84ad24 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6ad57e179390a3aef9909c5a8753284a0d84ad24/untabify.py
vout.setpf(1, -2))
vout.setpf((1, -2))
def record(v, info, filename, audiofilename, mono, grey, greybits, \ monotreshold, fields, preallocspace): import thread format, x, y, qsize, rate = info fps = 59.64 # Fields per second # XXX (Strange: need fps of Indigo monitor, not of PAL or NTSC!) tpf = 1000.0 / fps # Time per field in msec if filename: vout = VFile.VoutFile().init(filename) if mono: format = 'mono' elif grey and greybits == 8: format = 'grey' elif grey: format = 'grey'+`abs(greybits)` else: format = 'rgb8' vout.setformat(format) vout.setsize(x, y) if fields: vout.setpf(1, -2)) vout.writeheader() if preallocspace: print 'Preallocating space...' vout.prealloc(preallocspace) print 'done.' MAXSIZE = 20 # XXX should be a user option import Queue queue = Queue.Queue().init(MAXSIZE) done = thread.allocate_lock() done.acquire_lock() convertor = None if grey: if greybits == 2: convertor = imageop.grey2grey2 elif greybits == 4: convertor = imageop.grey2grey4 elif greybits == -2: convertor = imageop.dither2grey2 thread.start_new_thread(saveframes, \ (vout, queue, done, mono, monotreshold, convertor)) if audiofilename: audiodone = thread.allocate_lock() audiodone.acquire_lock() audiostop = [] initaudio(audiofilename, audiostop, audiodone) gl.wintitle('(rec) ' + filename) lastid = 0 t0 = time.millitimer() count = 0 ids = [] v.InitContinuousCapture(info) while not gl.qtest(): try: cd, id = v.GetCaptureData() except sv.error: #time.millisleep(10) # XXX is this necessary? sgi.nap(1) # XXX Try by Jack continue ids.append(id) id = id + 2*rate
d2942a048ba07e4c2288b2002ea095c6a8d774cd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d2942a048ba07e4c2288b2002ea095c6a8d774cd/Vrec.py
return _float(s)
return _float(s)
def atof(s): """atof(s) -> float Return the floating point number represented by the string s. """ if type(s) == _StringType: return _float(s) else: raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
def atof(s): """atof(s) -> float Return the floating point number represented by the string s. """ if type(s) == _StringType: return _float(s) else: raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
s = args[0]
s = args[0]
def atoi(*args): """atoi(s [,base]) -> int Return the integer represented by the string s in the given base, which defaults to 10. The string s must consist of one or more digits, possibly preceded by a sign. If base is 0, it is chosen from the leading characters of s, 0 for octal, 0x or 0X for hexadecimal. If base is 16, a preceding 0x or 0X is accepted. """ try: s = args[0] except IndexError: raise TypeError('function requires at least 1 argument: %d given' % len(args)) # Don't catch type error resulting from too many arguments to int(). The # error message isn't compatible but the error type is, and this function # is complicated enough already. if type(s) == _StringType: return _apply(_int, args) else: raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
raise TypeError('function requires at least 1 argument: %d given' % len(args))
raise TypeError('function requires at least 1 argument: %d given' % len(args))
def atoi(*args): """atoi(s [,base]) -> int Return the integer represented by the string s in the given base, which defaults to 10. The string s must consist of one or more digits, possibly preceded by a sign. If base is 0, it is chosen from the leading characters of s, 0 for octal, 0x or 0X for hexadecimal. If base is 16, a preceding 0x or 0X is accepted. """ try: s = args[0] except IndexError: raise TypeError('function requires at least 1 argument: %d given' % len(args)) # Don't catch type error resulting from too many arguments to int(). The # error message isn't compatible but the error type is, and this function # is complicated enough already. if type(s) == _StringType: return _apply(_int, args) else: raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
return _apply(_int, args)
return _apply(_int, args)
def atoi(*args): """atoi(s [,base]) -> int Return the integer represented by the string s in the given base, which defaults to 10. The string s must consist of one or more digits, possibly preceded by a sign. If base is 0, it is chosen from the leading characters of s, 0 for octal, 0x or 0X for hexadecimal. If base is 16, a preceding 0x or 0X is accepted. """ try: s = args[0] except IndexError: raise TypeError('function requires at least 1 argument: %d given' % len(args)) # Don't catch type error resulting from too many arguments to int(). The # error message isn't compatible but the error type is, and this function # is complicated enough already. if type(s) == _StringType: return _apply(_int, args) else: raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
def atoi(*args): """atoi(s [,base]) -> int Return the integer represented by the string s in the given base, which defaults to 10. The string s must consist of one or more digits, possibly preceded by a sign. If base is 0, it is chosen from the leading characters of s, 0 for octal, 0x or 0X for hexadecimal. If base is 16, a preceding 0x or 0X is accepted. """ try: s = args[0] except IndexError: raise TypeError('function requires at least 1 argument: %d given' % len(args)) # Don't catch type error resulting from too many arguments to int(). The # error message isn't compatible but the error type is, and this function # is complicated enough already. if type(s) == _StringType: return _apply(_int, args) else: raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
s = args[0]
s = args[0]
def atol(*args): """atol(s [,base]) -> long Return the long integer represented by the string s in the given base, which defaults to 10. The string s must consist of one or more digits, possibly preceded by a sign. If base is 0, it is chosen from the leading characters of s, 0 for octal, 0x or 0X for hexadecimal. If base is 16, a preceding 0x or 0X is accepted. A trailing L or l is not accepted, unless base is 0. """ try: s = args[0] except IndexError: raise TypeError('function requires at least 1 argument: %d given' % len(args)) # Don't catch type error resulting from too many arguments to long(). The # error message isn't compatible but the error type is, and this function # is complicated enough already. if type(s) == _StringType: return _apply(_long, args) else: raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
raise TypeError('function requires at least 1 argument: %d given' % len(args))
raise TypeError('function requires at least 1 argument: %d given' % len(args))
def atol(*args): """atol(s [,base]) -> long Return the long integer represented by the string s in the given base, which defaults to 10. The string s must consist of one or more digits, possibly preceded by a sign. If base is 0, it is chosen from the leading characters of s, 0 for octal, 0x or 0X for hexadecimal. If base is 16, a preceding 0x or 0X is accepted. A trailing L or l is not accepted, unless base is 0. """ try: s = args[0] except IndexError: raise TypeError('function requires at least 1 argument: %d given' % len(args)) # Don't catch type error resulting from too many arguments to long(). The # error message isn't compatible but the error type is, and this function # is complicated enough already. if type(s) == _StringType: return _apply(_long, args) else: raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
return _apply(_long, args)
return _apply(_long, args)
def atol(*args): """atol(s [,base]) -> long Return the long integer represented by the string s in the given base, which defaults to 10. The string s must consist of one or more digits, possibly preceded by a sign. If base is 0, it is chosen from the leading characters of s, 0 for octal, 0x or 0X for hexadecimal. If base is 16, a preceding 0x or 0X is accepted. A trailing L or l is not accepted, unless base is 0. """ try: s = args[0] except IndexError: raise TypeError('function requires at least 1 argument: %d given' % len(args)) # Don't catch type error resulting from too many arguments to long(). The # error message isn't compatible but the error type is, and this function # is complicated enough already. if type(s) == _StringType: return _apply(_long, args) else: raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
def atol(*args): """atol(s [,base]) -> long Return the long integer represented by the string s in the given base, which defaults to 10. The string s must consist of one or more digits, possibly preceded by a sign. If base is 0, it is chosen from the leading characters of s, 0 for octal, 0x or 0X for hexadecimal. If base is 16, a preceding 0x or 0X is accepted. A trailing L or l is not accepted, unless base is 0. """ try: s = args[0] except IndexError: raise TypeError('function requires at least 1 argument: %d given' % len(args)) # Don't catch type error resulting from too many arguments to long(). The # error message isn't compatible but the error type is, and this function # is complicated enough already. if type(s) == _StringType: return _apply(_long, args) else: raise TypeError('argument 1: expected string, %s found' % type(s).__name__)
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
half = half+1
half = half+1
def center(s, width): """center(s, width) -> string Return a center version of s, in a field of the specified width. padded with spaces as needed. The string is never truncated. """ n = width - len(s) if n <= 0: return s half = n/2 if n%2 and width%2: # This ensures that center(center(s, i), j) = center(s, j) half = half+1 return ' '*half + s + ' '*(n-half)
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
sign, s = s[0], s[1:]
sign, s = s[0], s[1:]
def zfill(x, width): """zfill(x, width) -> string Pad a numeric string x with zeros on the left, to fill a field of the specified width. The string x is never truncated. """ if type(x) == type(''): s = x else: s = `x` n = len(s) if n >= width: return s sign = '' if s[0] in ('-', '+'): sign, s = s[0], s[1:] return sign + '0'*(width-n) + s
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
if c == '\t': c = ' '*(tabsize - len(line) % tabsize) line = line + c if c == '\n': res = res + line line = ''
if c == '\t': c = ' '*(tabsize - len(line) % tabsize) line = line + c if c == '\n': res = res + line line = ''
def expandtabs(s, tabsize=8): """expandtabs(s [,tabsize]) -> string Return a copy of the string s with all tab characters replaced by the appropriate number of spaces, depending on the current column, and the tabsize (default 8). """ res = line = '' for c in s: if c == '\t': c = ' '*(tabsize - len(line) % tabsize) line = line + c if c == '\n': res = res + line line = '' return res + line
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
raise ValueError, "maketrans arguments must have same length"
raise ValueError, "maketrans arguments must have same length"
def maketrans(fromstr, tostr): """maketrans(frm, to) -> string Return a translation table (a string of 256 bytes long) suitable for use in string.translate. The strings frm and to must be of the same length. """ if len(fromstr) != len(tostr): raise ValueError, "maketrans arguments must have same length" global _idmapL if not _idmapL: _idmapL = map(None, _idmap) L = _idmapL[:] fromstr = map(ord, fromstr) for i in range(len(fromstr)): L[fromstr[i]] = tostr[i] return joinfields(L, "")
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
_idmapL = map(None, _idmap)
_idmapL = map(None, _idmap)
def maketrans(fromstr, tostr): """maketrans(frm, to) -> string Return a translation table (a string of 256 bytes long) suitable for use in string.translate. The strings frm and to must be of the same length. """ if len(fromstr) != len(tostr): raise ValueError, "maketrans arguments must have same length" global _idmapL if not _idmapL: _idmapL = map(None, _idmap) L = _idmapL[:] fromstr = map(ord, fromstr) for i in range(len(fromstr)): L[fromstr[i]] = tostr[i] return joinfields(L, "")
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
L[fromstr[i]] = tostr[i]
L[fromstr[i]] = tostr[i]
def maketrans(fromstr, tostr): """maketrans(frm, to) -> string Return a translation table (a string of 256 bytes long) suitable for use in string.translate. The strings frm and to must be of the same length. """ if len(fromstr) != len(tostr): raise ValueError, "maketrans arguments must have same length" global _idmapL if not _idmapL: _idmapL = map(None, _idmap) L = _idmapL[:] fromstr = map(ord, fromstr) for i in range(len(fromstr)): L[fromstr[i]] = tostr[i] return joinfields(L, "")
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
pass
pass
def replace(s, old, new, maxsplit=0): """replace (str, old, new[, maxsplit]) -> string Return a copy of string str with all occurrences of substring old replaced by new. If the optional argument maxsplit is given, only the first maxsplit occurrences are replaced. """ return s.replace(old, new, maxsplit)
5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/5c6813c5ba3e5df4b25e1aa4be5b9be13f19b6c8/string.py
pdict: dictionary containing other parameters of conten-type header
pdict: dictionary containing other parameters of content-type header
def parse_multipart(fp, pdict): """Parse multipart input. Arguments: fp : input file pdict: dictionary containing other parameters of conten-type header Returns a dictionary just like parse_qs(): keys are the field names, each value is a list of values for that field. This is easy to use but not much good if you are expecting megabytes to be uploaded -- in that case, use the FieldStorage class instead which is much more flexible. Note that content-type is the raw, unparsed contents of the content-type header. XXX This does not parse nested multipart parts -- use FieldStorage for that. XXX This should really be subsumed by FieldStorage altogether -- no point in having two implementations of the same parsing algorithm. """ boundary = "" if 'boundary' in pdict: boundary = pdict['boundary'] if not valid_boundary(boundary): raise ValueError, ('Invalid boundary in multipart form: %r' % (boundary,)) nextpart = "--" + boundary lastpart = "--" + boundary + "--" partdict = {} terminator = "" while terminator != lastpart: bytes = -1 data = None if terminator: # At start of next part. Read headers first. headers = mimetools.Message(fp) clength = headers.getheader('content-length') if clength: try: bytes = int(clength) except ValueError: pass if bytes > 0: if maxlen and bytes > maxlen: raise ValueError, 'Maximum content length exceeded' data = fp.read(bytes) else: data = "" # Read lines until end of part. lines = [] while 1: line = fp.readline() if not line: terminator = lastpart # End outer loop break if line[:2] == "--": terminator = line.strip() if terminator in (nextpart, lastpart): break lines.append(line) # Done with part. if data is None: continue if bytes < 0: if lines: # Strip final line terminator line = lines[-1] if line[-2:] == "\r\n": line = line[:-2] elif line[-1:] == "\n": line = line[:-1] lines[-1] = line data = "".join(lines) line = headers['content-disposition'] if not line: continue key, params = parse_header(line) if key != 'form-data': continue if 'name' in params: name = params['name'] else: continue if name in partdict: partdict[name].append(data) else: partdict[name] = [data] return partdict
a0a9591c71bb1d37bac83989c354bd85e6b9802e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/a0a9591c71bb1d37bac83989c354bd85e6b9802e/cgi.py
headers = mimetools.Message(fp)
headers = _header_parser.parse(fp)
def parse_multipart(fp, pdict): """Parse multipart input. Arguments: fp : input file pdict: dictionary containing other parameters of conten-type header Returns a dictionary just like parse_qs(): keys are the field names, each value is a list of values for that field. This is easy to use but not much good if you are expecting megabytes to be uploaded -- in that case, use the FieldStorage class instead which is much more flexible. Note that content-type is the raw, unparsed contents of the content-type header. XXX This does not parse nested multipart parts -- use FieldStorage for that. XXX This should really be subsumed by FieldStorage altogether -- no point in having two implementations of the same parsing algorithm. """ boundary = "" if 'boundary' in pdict: boundary = pdict['boundary'] if not valid_boundary(boundary): raise ValueError, ('Invalid boundary in multipart form: %r' % (boundary,)) nextpart = "--" + boundary lastpart = "--" + boundary + "--" partdict = {} terminator = "" while terminator != lastpart: bytes = -1 data = None if terminator: # At start of next part. Read headers first. headers = mimetools.Message(fp) clength = headers.getheader('content-length') if clength: try: bytes = int(clength) except ValueError: pass if bytes > 0: if maxlen and bytes > maxlen: raise ValueError, 'Maximum content length exceeded' data = fp.read(bytes) else: data = "" # Read lines until end of part. lines = [] while 1: line = fp.readline() if not line: terminator = lastpart # End outer loop break if line[:2] == "--": terminator = line.strip() if terminator in (nextpart, lastpart): break lines.append(line) # Done with part. if data is None: continue if bytes < 0: if lines: # Strip final line terminator line = lines[-1] if line[-2:] == "\r\n": line = line[:-2] elif line[-1:] == "\n": line = line[:-1] lines[-1] = line data = "".join(lines) line = headers['content-disposition'] if not line: continue key, params = parse_header(line) if key != 'form-data': continue if 'name' in params: name = params['name'] else: continue if name in partdict: partdict[name].append(data) else: partdict[name] = [data] return partdict
a0a9591c71bb1d37bac83989c354bd85e6b9802e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/a0a9591c71bb1d37bac83989c354bd85e6b9802e/cgi.py
headers: a dictionary(-like) object (sometimes rfc822.Message or a subclass thereof) containing *all* headers
headers: a dictionary(-like) object (sometimes email.Message.Message or a subclass thereof) containing *all* headers
def __repr__(self): """Return printable representation.""" return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
a0a9591c71bb1d37bac83989c354bd85e6b9802e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/a0a9591c71bb1d37bac83989c354bd85e6b9802e/cgi.py
headers = rfc822.Message(self.fp)
headers = _header_parser.parse(self.fp)
def read_multi(self, environ, keep_blank_values, strict_parsing): """Internal: read a part that is itself multipart.""" ib = self.innerboundary if not valid_boundary(ib): raise ValueError, 'Invalid boundary in multipart form: %r' % (ib,) self.list = [] klass = self.FieldStorageClass or self.__class__ part = klass(self.fp, {}, ib, environ, keep_blank_values, strict_parsing) # Throw first part away while not part.done: headers = rfc822.Message(self.fp) part = klass(self.fp, headers, ib, environ, keep_blank_values, strict_parsing) self.list.append(part) self.skip_lines()
a0a9591c71bb1d37bac83989c354bd85e6b9802e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/a0a9591c71bb1d37bac83989c354bd85e6b9802e/cgi.py
if type(f) == type(''):
if isinstance(f, basestring):
def __init__(self, f): self._i_opened_the_file = None if type(f) == type(''): f = __builtin__.open(f, 'rb') self._i_opened_the_file = f # else, assume it is an open file object already self.initfp(f)
d3ffc432efd1910be08c1b82016d6725877d5910 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d3ffc432efd1910be08c1b82016d6725877d5910/wave.py
if type(f) == type(''):
if isinstance(f, basestring):
def __init__(self, f): self._i_opened_the_file = None if type(f) == type(''): f = __builtin__.open(f, 'wb') self._i_opened_the_file = f self.initfp(f)
d3ffc432efd1910be08c1b82016d6725877d5910 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d3ffc432efd1910be08c1b82016d6725877d5910/wave.py
else: MacOS.HandleEvent(event)
return MacOS.HandleEvent(event)
def lowlevelhandler(self, event): what, message, when, where, modifiers = event h, v = where if what == kHighLevelEvent: msg = "High Level Event: %s %s" % \ (`code(message)`, `code(h | (v<<16))`) try: AE.AEProcessAppleEvent(event) except AE.Error, err: print 'AE error: ', err print 'in', msg traceback.print_exc() return elif what == keyDown: c = chr(message & charCodeMask) if modifiers & cmdKey: if c == '.': raise KeyboardInterrupt, "Command-period" if c == 'q': self.quitting = 1 elif what == mouseDown: partcode, window = Win.FindWindow(where) if partcode == inMenuBar: result = Menu.MenuSelect(where) id = (result>>16) & 0xffff # Hi word item = result & 0xffff # Lo word if id == self.appleid: if item == 1: EasyDialogs.Message(self.getabouttext()) elif item > 1: name = self.applemenu.GetMenuItemText(item) Menu.OpenDeskAcc(name) elif id == self.quitid and item == 1: self.quitting = 1 Menu.HiliteMenu(0) else: # Anything not handled is passed to Python/SIOUX MacOS.HandleEvent(event)
9c6b085affd204d7edff36c550899a6936240cd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/9c6b085affd204d7edff36c550899a6936240cd3/MiniAEFrame.py
args = getnewargs()
args = getnewargs()
def save_newobj(self, obj): # Save a new-style class instance, using protocol 2. # XXX This is still experimental. assert self.proto >= 2 # This only works for protocol 2 t = type(obj) getnewargs = getattr(obj, "__getnewargs__", None) if getnewargs: args = getnewargs() # This bette not reference obj else: args = ()
d6a31636da7400ceecfd43ac27181aa672eab3e8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d6a31636da7400ceecfd43ac27181aa672eab3e8/pickle.py
in setRollover().
in doRollover().
def emit(self, record): """ Emit a record.
6451a5e038a86cd88ded99e25d7c49870adb33ed /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6451a5e038a86cd88ded99e25d7c49870adb33ed/handlers.py
value = _EmptyClass() value.__class__ = klass else:
try: value = _EmptyClass() value.__class__ = klass except RuntimeError: pass if not instantiated:
def load_inst(self): k = self.marker() args = tuple(self.stack[k+1:]) del self.stack[k:] module = self.readline()[:-1] name = self.readline()[:-1] klass = self.find_class(module, name)
ff8449ba8552a8a0a047b906f2e91300f0b79a26 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ff8449ba8552a8a0a047b906f2e91300f0b79a26/pickle.py
value = _EmptyClass() value.__class__ = klass else:
try: value = _EmptyClass() value.__class__ = klass instantiated = 1 except RuntimeError: pass if not instantiated:
def load_obj(self): stack = self.stack k = self.marker() klass = stack[k + 1] del stack[k + 1] args = tuple(stack[k + 1:]) del stack[k:]
ff8449ba8552a8a0a047b906f2e91300f0b79a26 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ff8449ba8552a8a0a047b906f2e91300f0b79a26/pickle.py
inst.__dict__.update(value)
try: inst.__dict__.update(value) except RuntimeError: for k, v in value.items(): setattr(inst, k, v)
def load_build(self): stack = self.stack value = stack[-1] del stack[-1] inst = stack[-1] try: setstate = inst.__setstate__ except AttributeError: inst.__dict__.update(value) else: setstate(value)
ff8449ba8552a8a0a047b906f2e91300f0b79a26 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ff8449ba8552a8a0a047b906f2e91300f0b79a26/pickle.py
__starttag_text = None
def parse_pi(self, i): rawdata = self.rawdata if rawdata[i:i+2] != '<?': self.error('unexpected call to parse_pi()') match = piclose.search(rawdata, i+2) if not match: return -1 j = match.start(0) self.handle_pi(rawdata[i+2: j]) j = match.end(0) return j-i
d81dad1baddf4309ff591d85c266eb50f50464e8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d81dad1baddf4309ff591d85c266eb50f50464e8/sgmllib.py
def __delitem__(self, key): del self.data[key.upper()]
try: unsetenv except NameError: def __delitem__(self, key): del self.data[key.upper()] else: def __delitem__(self, key): unsetenv(key) del self.data[key.upper()]
def __delitem__(self, key): del self.data[key.upper()]
56de816c527fa4459fcadb6fd1ce352f72524560 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/56de816c527fa4459fcadb6fd1ce352f72524560/os.py
st2 = parser.sequence2ast(t)
try: st2 = parser.sequence2ast(t) except parser.ParserError: print "Failing syntax tree:" pprint.pprint(t) raise
def roundtrip(f, s): st1 = f(s) t = st1.totuple() st2 = parser.sequence2ast(t)
524d823772ea71560f25a69eb45d0a9b95d9d732 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/524d823772ea71560f25a69eb45d0a9b95d9d732/test_parser.py
buffering. Sublcasses should however, if possible, try to
buffering. Subclasses should however, if possible, try to
def readline(self, size=None):
cc9b5a69ccf48034aea311439713c904f9aa479c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/cc9b5a69ccf48034aea311439713c904f9aa479c/codecs.py
was specified. Thisis done to avoid data loss due to encodings
was specified. This is done to avoid data loss due to encodings
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1): """ Open an encoded file using the given mode and return a wrapped version providing transparent encoding/decoding. Note: The wrapped version will only accept the object format defined by the codecs, i.e. Unicode objects for most builtin codecs. Output is also codec dependent and will usually by Unicode as well. Files are always opened in binary mode, even if no binary mode was specified. Thisis done to avoid data loss due to encodings using 8-bit values. The default file mode is 'rb' meaning to open the file in binary read mode. encoding specifies the encoding which is to be used for the the file. errors may be given to define the error handling. It defaults to 'strict' which causes ValueErrors to be raised in case an encoding error occurs. buffering has the same meaning as for the builtin open() API. It defaults to line buffered. The returned wrapped file object provides an extra attribute .encoding which allows querying the used encoding. This attribute is only available if an encoding was specified as parameter. """ if encoding is not None and \ 'b' not in mode: # Force opening of the file in binary mode mode = mode + 'b' file = __builtin__.open(filename, mode, buffering) if encoding is None: return file (e, d, sr, sw) = lookup(encoding) srw = StreamReaderWriter(file, sr, sw, errors) # Add attributes to simplify introspection srw.encoding = encoding return srw
cc9b5a69ccf48034aea311439713c904f9aa479c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/cc9b5a69ccf48034aea311439713c904f9aa479c/codecs.py
the file.
file.
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1): """ Open an encoded file using the given mode and return a wrapped version providing transparent encoding/decoding. Note: The wrapped version will only accept the object format defined by the codecs, i.e. Unicode objects for most builtin codecs. Output is also codec dependent and will usually by Unicode as well. Files are always opened in binary mode, even if no binary mode was specified. Thisis done to avoid data loss due to encodings using 8-bit values. The default file mode is 'rb' meaning to open the file in binary read mode. encoding specifies the encoding which is to be used for the the file. errors may be given to define the error handling. It defaults to 'strict' which causes ValueErrors to be raised in case an encoding error occurs. buffering has the same meaning as for the builtin open() API. It defaults to line buffered. The returned wrapped file object provides an extra attribute .encoding which allows querying the used encoding. This attribute is only available if an encoding was specified as parameter. """ if encoding is not None and \ 'b' not in mode: # Force opening of the file in binary mode mode = mode + 'b' file = __builtin__.open(filename, mode, buffering) if encoding is None: return file (e, d, sr, sw) = lookup(encoding) srw = StreamReaderWriter(file, sr, sw, errors) # Add attributes to simplify introspection srw.encoding = encoding return srw
cc9b5a69ccf48034aea311439713c904f9aa479c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/cc9b5a69ccf48034aea311439713c904f9aa479c/codecs.py
If a target mapping in the decoding map occurrs multiple
If a target mapping in the decoding map occurs multiple
def make_encoding_map(decoding_map): """ Creates an encoding map from a decoding map. If a target mapping in the decoding map occurrs multiple times, then that target is mapped to None (undefined mapping), causing an exception when encountered by the charmap codec during translation. One example where this happens is cp875.py which decodes multiple character to \u001a. """ m = {} for k,v in decoding_map.items(): if not v in m: m[v] = k else: m[v] = None return m
cc9b5a69ccf48034aea311439713c904f9aa479c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/cc9b5a69ccf48034aea311439713c904f9aa479c/codecs.py
FileHandler.names = (socket.gethostbyname('localhost'), socket.gethostbyname(socket.gethostname()))
try: FileHandler.names = (socket.gethostbyname('localhost'), socket.gethostbyname(socket.gethostname())) except socket.gaierror: FileHandler.names = (socket.gethostbyname('localhost'),)
def get_names(self): if FileHandler.names is None: FileHandler.names = (socket.gethostbyname('localhost'), socket.gethostbyname(socket.gethostname())) return FileHandler.names
9d52b453744fb559e9fd8733b7e5bfd32d459992 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/9d52b453744fb559e9fd8733b7e5bfd32d459992/urllib2.py
data = s[:n]
def pack_fstring(self, n, s): if n < 0: raise ValueError, 'fstring size must be nonnegative' n = ((n+3)/4)*4 data = s[:n] data = data + (n - len(data)) * '\0' self.__buf.write(data)
cd9eb8e24ccef50162bc70cad052ca96ee084eff /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/cd9eb8e24ccef50162bc70cad052ca96ee084eff/xdrlib.py
if e.errno == errno.EAGAIN:
if e.errno in (errno.EAGAIN, errno.EACCES):
def _lock_file(f, dotlock=True): """Lock file f using lockf and dot locking.""" dotlock_done = False try: if fcntl: try: fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError, e: if e.errno == errno.EAGAIN: raise ExternalClashError('lockf: lock unavailable: %s' % f.name) else: raise if dotlock: try: pre_lock = _create_temporary(f.name + '.lock') pre_lock.close() except IOError, e: if e.errno == errno.EACCES: return # Without write access, just skip dotlocking. else: raise try: if hasattr(os, 'link'): os.link(pre_lock.name, f.name + '.lock') dotlock_done = True os.unlink(pre_lock.name) else: os.rename(pre_lock.name, f.name + '.lock') dotlock_done = True except OSError, e: if e.errno == errno.EEXIST: os.remove(pre_lock.name) raise ExternalClashError('dot lock unavailable: %s' % f.name) else: raise except: if fcntl: fcntl.lockf(f, fcntl.LOCK_UN) if dotlock_done: os.remove(f.name + '.lock') raise
63bbfc83aa083c2083e0f12f476f381820f468dc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/63bbfc83aa083c2083e0f12f476f381820f468dc/mailbox.py
if os.name == "posix": check_environ() sys_dir = os.path.dirname(sys.modules['distutils'].__file__) sys_file = os.path.join(sys_dir, "pydistutils.cfg") if os.path.isfile(sys_file): files.append(sys_file) user_file = os.path.join(os.environ.get('HOME'), ".pydistutils.cfg")
check_environ() if os.name=='posix': sys_dir = os.path.dirname(sys.modules['distutils'].__file__) user_filename = ".pydistutils.cfg" else: sys_dir = sysconfig.PREFIX user_filename = "pydistutils.cfg" sys_file = os.path.join(sys_dir, "pydistutils.cfg") if os.path.isfile(sys_file): files.append(sys_file) if os.environ.has_key('HOME'): user_file = os.path.join(os.environ.get('HOME'), user_filename)
def find_config_files (self): """Find as many configuration files as should be processed for this platform, and return a list of filenames in the order in which they should be parsed. The filenames returned are guaranteed to exist (modulo nasty race conditions).
af67b6ca97843c23f9b8ed5b29f76f6b3291658b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/af67b6ca97843c23f9b8ed5b29f76f6b3291658b/dist.py
else: sys_file = os.path.join (sysconfig.PREFIX, "pydistutils.cfg") if os.path.isfile(sys_file): files.append(sys_file)
def find_config_files (self): """Find as many configuration files as should be processed for this platform, and return a list of filenames in the order in which they should be parsed. The filenames returned are guaranteed to exist (modulo nasty race conditions).
af67b6ca97843c23f9b8ed5b29f76f6b3291658b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/af67b6ca97843c23f9b8ed5b29f76f6b3291658b/dist.py
self.assertRaises(AttributeError, f, LenOnly(), 10)
self.assertRaises(TypeError, f, LenOnly(), 10)
def test_len_only(self): for f in (bisect_left, bisect_right, insort_left, insort_right): self.assertRaises(AttributeError, f, LenOnly(), 10)
29b8a0c66142e6f370bc3da427b5771117972990 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/29b8a0c66142e6f370bc3da427b5771117972990/test_bisect.py
self.assertRaises(AttributeError, f, GetOnly(), 10)
self.assertRaises(TypeError, f, GetOnly(), 10)
def test_get_only(self): for f in (bisect_left, bisect_right, insort_left, insort_right): self.assertRaises(AttributeError, f, GetOnly(), 10)
29b8a0c66142e6f370bc3da427b5771117972990 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/29b8a0c66142e6f370bc3da427b5771117972990/test_bisect.py
data = self.sslobj.read(size) while len(data) < size: data += self.sslobj.read(size-len(data)) return data
chunks = [] read = 0 while read < size: data = self.sslobj.read(size-read) read += len(data) chunks.append(size) return ''.join(chunks)
def read(self, size): """Read 'size' bytes from remote.""" # sslobj.read() sometimes returns < size bytes data = self.sslobj.read(size) while len(data) < size: data += self.sslobj.read(size-len(data))
0b1e6c5aa3592c6043eae0935534b913c6005989 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/0b1e6c5aa3592c6043eae0935534b913c6005989/imaplib.py
line = ""
line = []
def readline(self): """Read line from remote.""" # NB: socket.ssl needs a "readline" method, or perhaps a "makefile" method. line = "" while 1: char = self.sslobj.read(1) line += char if char == "\n": return line
0b1e6c5aa3592c6043eae0935534b913c6005989 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/0b1e6c5aa3592c6043eae0935534b913c6005989/imaplib.py