rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
if verbose: print 'unpack tuple wrong size' try: a, b = t raise TestFailed except ValueError: pass
Now for some failures
def __getitem__(self, i): if i >= 0 and i < 3: return i raise IndexError
989a9d01aa1cbad29606c3b177b5c247a75fefc0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/989a9d01aa1cbad29606c3b177b5c247a75fefc0/test_unpack.py
if verbose: print 'unpack list wrong size' try: a, b = l raise TestFailed except ValueError: pass
Unpacking non-sequence
def __getitem__(self, i): if i >= 0 and i < 3: return i raise IndexError
989a9d01aa1cbad29606c3b177b5c247a75fefc0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/989a9d01aa1cbad29606c3b177b5c247a75fefc0/test_unpack.py
if verbose: print 'unpack sequence too short' try: a, b, c, d = Seq() raise TestFailed except ValueError: pass
Unpacking tuple of wrong size
def __getitem__(self, i): if i >= 0 and i < 3: return i raise IndexError
989a9d01aa1cbad29606c3b177b5c247a75fefc0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/989a9d01aa1cbad29606c3b177b5c247a75fefc0/test_unpack.py
if verbose: print 'unpack sequence too long' try: a, b = Seq() raise TestFailed except ValueError: pass
Unpacking tuple of wrong size
def __getitem__(self, i): if i >= 0 and i < 3: return i raise IndexError
989a9d01aa1cbad29606c3b177b5c247a75fefc0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/989a9d01aa1cbad29606c3b177b5c247a75fefc0/test_unpack.py
class BozoError(Exception): pass
Unpacking sequence too short
def __getitem__(self, i): if i >= 0 and i < 3: return i raise IndexError
989a9d01aa1cbad29606c3b177b5c247a75fefc0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/989a9d01aa1cbad29606c3b177b5c247a75fefc0/test_unpack.py
class BadSeq: def __getitem__(self, i): if i >= 0 and i < 3: return i elif i == 3: raise BozoError else: raise IndexError
>>> a, b, c, d = Seq() Traceback (most recent call last): ... ValueError: need more than 3 values to unpack
def __getitem__(self, i): if i >= 0 and i < 3: return i raise IndexError
989a9d01aa1cbad29606c3b177b5c247a75fefc0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/989a9d01aa1cbad29606c3b177b5c247a75fefc0/test_unpack.py
if verbose: print 'unpack sequence too long, wrong error' try: a, b, c, d, e = BadSeq() raise TestFailed except BozoError: pass
>>> a, b = Seq() Traceback (most recent call last): ... ValueError: too many values to unpack
def __getitem__(self, i): if i >= 0 and i < 3: return i elif i == 3: raise BozoError else: raise IndexError
989a9d01aa1cbad29606c3b177b5c247a75fefc0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/989a9d01aa1cbad29606c3b177b5c247a75fefc0/test_unpack.py
if verbose: print 'unpack sequence too short, wrong error' try: a, b, c = BadSeq() raise TestFailed except BozoError: pass
Unpacking a sequence where the test for too long raises a different kind of error >>> class BozoError(Exception): ... pass ... >>> class BadSeq: ... def __getitem__(self, i): ... if i >= 0 and i < 3: ... return i ... elif i == 3: ... raise BozoError ... else: ... raise IndexError ... Trigger code while not expecting an IndexError (unpack sequence too long, wrong error) >>> a, b, c, d, e = BadSeq() Traceback (most recent call last): ... BozoError Trigger code while expecting an IndexError (unpack sequence too short, wrong error) >>> a, b, c = BadSeq() Traceback (most recent call last): ... BozoError """ __test__ = {'doctests' : doctests} def test_main(verbose=False): import sys from test import test_support from test import test_unpack test_support.run_doctest(test_unpack, verbose) if __name__ == "__main__": test_main(verbose=True)
def __getitem__(self, i): if i >= 0 and i < 3: return i elif i == 3: raise BozoError else: raise IndexError
989a9d01aa1cbad29606c3b177b5c247a75fefc0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/989a9d01aa1cbad29606c3b177b5c247a75fefc0/test_unpack.py
return copy.copy(self)
data = self.data try: self.data = {} c = copy.copy(self) finally: self.data = data c.update(self) return c
def copy(self): if self.__class__ is UserDict: return UserDict(self.data) import copy return copy.copy(self)
a62ee412b2fc9ff1119a8d68522ff3d552458004 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/a62ee412b2fc9ff1119a8d68522ff3d552458004/UserDict.py
eq(value, 'text/plain; charset=us-ascii; boundary="BOUNDARY"')
eq(value, 'text/plain; charset="us-ascii"; boundary="BOUNDARY"')
def test_set_boundary(self): eq = self.assertEqual # This one has no existing boundary parameter, but the Content-Type: # header appears fifth. msg = self._msgobj('msg_01.txt') msg.set_boundary('BOUNDARY') header, value = msg.items()[4] eq(header.lower(), 'content-type') eq(value, 'text/plain; charset=us-ascii; boundary="BOUNDARY"') # This one has a Content-Type: header, with a boundary, stuck in the # middle of its headers. Make sure the order is preserved; it should # be fifth. msg = self._msgobj('msg_04.txt') msg.set_boundary('BOUNDARY') header, value = msg.items()[4] eq(header.lower(), 'content-type') eq(value, 'multipart/mixed; boundary="BOUNDARY"') # And this one has no Content-Type: header at all. msg = self._msgobj('msg_03.txt') self.assertRaises(Errors.HeaderParseError, msg.set_boundary, 'BOUNDARY')
0a6216307db56ee14f9dd07082379cd6a062f135 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/0a6216307db56ee14f9dd07082379cd6a062f135/test_email.py
self._writeBody(msg) else: specific = UNDERSCORE.join(ctype.split('/')).replace('-', '_') meth = getattr(self, '_handle_' + specific, None)
ctype = msg.get_default_type() assert ctype in ('text/plain', 'message/rfc822') main, sub = ctype.split('/') specific = UNDERSCORE.join((main, sub)).replace('-', '_') meth = getattr(self, '_handle_' + specific, None) if meth is None: generic = main.replace('-', '_') meth = getattr(self, '_handle_' + generic, None)
def _dispatch(self, msg): # Get the Content-Type: for the message, then try to dispatch to # self._handle_maintype_subtype(). If there's no handler for the full # MIME type, then dispatch to self._handle_maintype(). If that's # missing too, then dispatch to self._writeBody(). ctype = msg.get_type() if ctype is None: # No Content-Type: header so try the default handler self._writeBody(msg) else: # We do have a Content-Type: header. specific = UNDERSCORE.join(ctype.split('/')).replace('-', '_') meth = getattr(self, '_handle_' + specific, None) if meth is None: generic = msg.get_main_type().replace('-', '_') meth = getattr(self, '_handle_' + generic, None) if meth is None: meth = self._writeBody meth(msg)
ee084c72c49e60af052e272006f74cb07fc13eb2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ee084c72c49e60af052e272006f74cb07fc13eb2/Generator.py
generic = msg.get_main_type().replace('-', '_') meth = getattr(self, '_handle_' + generic, None) if meth is None: meth = self._writeBody meth(msg)
meth = self._writeBody meth(msg)
def _dispatch(self, msg): # Get the Content-Type: for the message, then try to dispatch to # self._handle_maintype_subtype(). If there's no handler for the full # MIME type, then dispatch to self._handle_maintype(). If that's # missing too, then dispatch to self._writeBody(). ctype = msg.get_type() if ctype is None: # No Content-Type: header so try the default handler self._writeBody(msg) else: # We do have a Content-Type: header. specific = UNDERSCORE.join(ctype.split('/')).replace('-', '_') meth = getattr(self, '_handle_' + specific, None) if meth is None: generic = msg.get_main_type().replace('-', '_') meth = getattr(self, '_handle_' + generic, None) if meth is None: meth = self._writeBody meth(msg)
ee084c72c49e60af052e272006f74cb07fc13eb2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ee084c72c49e60af052e272006f74cb07fc13eb2/Generator.py
def _handle_multipart(self, msg, isdigest=0):
def _handle_multipart(self, msg):
def _handle_multipart(self, msg, isdigest=0): # The trick here is to write out each part separately, merge them all # together, and then make sure that the boundary we've chosen isn't # present in the payload. msgtexts = [] subparts = msg.get_payload() if subparts is None: # Nothing has every been attached boundary = msg.get_boundary(failobj=_make_boundary()) print >> self._fp, '--' + boundary print >> self._fp, '\n' print >> self._fp, '--' + boundary + '--' return elif not isinstance(subparts, ListType): # Scalar payload subparts = [subparts] for part in subparts: s = StringIO() g = self.__class__(s, self._mangle_from_, self.__maxheaderlen) g.flatten(part, unixfrom=0) msgtexts.append(s.getvalue()) # Now make sure the boundary we've selected doesn't appear in any of # the message texts. alltext = NL.join(msgtexts) # BAW: What about boundaries that are wrapped in double-quotes? boundary = msg.get_boundary(failobj=_make_boundary(alltext)) # If we had to calculate a new boundary because the body text # contained that string, set the new boundary. We don't do it # unconditionally because, while set_boundary() preserves order, it # doesn't preserve newlines/continuations in headers. This is no big # deal in practice, but turns out to be inconvenient for the unittest # suite. if msg.get_boundary() <> boundary: msg.set_boundary(boundary) # Write out any preamble if msg.preamble is not None: self._fp.write(msg.preamble) # First boundary is a bit different; it doesn't have a leading extra # newline. print >> self._fp, '--' + boundary if isdigest: print >> self._fp # Join and write the individual parts joiner = '\n--' + boundary + '\n' if isdigest: # multipart/digest types effectively add an extra newline between # the boundary and the body part. joiner += '\n' self._fp.write(joiner.join(msgtexts)) print >> self._fp, '\n--' + boundary + '--', # Write out any epilogue if msg.epilogue is not None: if not msg.epilogue.startswith('\n'): print >> self._fp self._fp.write(msg.epilogue)
ee084c72c49e60af052e272006f74cb07fc13eb2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ee084c72c49e60af052e272006f74cb07fc13eb2/Generator.py
def _handle_multipart(self, msg, isdigest=0): # The trick here is to write out each part separately, merge them all # together, and then make sure that the boundary we've chosen isn't # present in the payload. msgtexts = [] subparts = msg.get_payload() if subparts is None: # Nothing has every been attached boundary = msg.get_boundary(failobj=_make_boundary()) print >> self._fp, '--' + boundary print >> self._fp, '\n' print >> self._fp, '--' + boundary + '--' return elif not isinstance(subparts, ListType): # Scalar payload subparts = [subparts] for part in subparts: s = StringIO() g = self.__class__(s, self._mangle_from_, self.__maxheaderlen) g.flatten(part, unixfrom=0) msgtexts.append(s.getvalue()) # Now make sure the boundary we've selected doesn't appear in any of # the message texts. alltext = NL.join(msgtexts) # BAW: What about boundaries that are wrapped in double-quotes? boundary = msg.get_boundary(failobj=_make_boundary(alltext)) # If we had to calculate a new boundary because the body text # contained that string, set the new boundary. We don't do it # unconditionally because, while set_boundary() preserves order, it # doesn't preserve newlines/continuations in headers. This is no big # deal in practice, but turns out to be inconvenient for the unittest # suite. if msg.get_boundary() <> boundary: msg.set_boundary(boundary) # Write out any preamble if msg.preamble is not None: self._fp.write(msg.preamble) # First boundary is a bit different; it doesn't have a leading extra # newline. print >> self._fp, '--' + boundary if isdigest: print >> self._fp # Join and write the individual parts joiner = '\n--' + boundary + '\n' if isdigest: # multipart/digest types effectively add an extra newline between # the boundary and the body part. joiner += '\n' self._fp.write(joiner.join(msgtexts)) print >> self._fp, '\n--' + boundary + '--', # Write out any epilogue if msg.epilogue is not None: if not msg.epilogue.startswith('\n'): print >> self._fp self._fp.write(msg.epilogue)
ee084c72c49e60af052e272006f74cb07fc13eb2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ee084c72c49e60af052e272006f74cb07fc13eb2/Generator.py
g = self.__class__(s, self._mangle_from_, self.__maxheaderlen)
g = self.clone(s)
def _handle_multipart(self, msg, isdigest=0): # The trick here is to write out each part separately, merge them all # together, and then make sure that the boundary we've chosen isn't # present in the payload. msgtexts = [] subparts = msg.get_payload() if subparts is None: # Nothing has every been attached boundary = msg.get_boundary(failobj=_make_boundary()) print >> self._fp, '--' + boundary print >> self._fp, '\n' print >> self._fp, '--' + boundary + '--' return elif not isinstance(subparts, ListType): # Scalar payload subparts = [subparts] for part in subparts: s = StringIO() g = self.__class__(s, self._mangle_from_, self.__maxheaderlen) g.flatten(part, unixfrom=0) msgtexts.append(s.getvalue()) # Now make sure the boundary we've selected doesn't appear in any of # the message texts. alltext = NL.join(msgtexts) # BAW: What about boundaries that are wrapped in double-quotes? boundary = msg.get_boundary(failobj=_make_boundary(alltext)) # If we had to calculate a new boundary because the body text # contained that string, set the new boundary. We don't do it # unconditionally because, while set_boundary() preserves order, it # doesn't preserve newlines/continuations in headers. This is no big # deal in practice, but turns out to be inconvenient for the unittest # suite. if msg.get_boundary() <> boundary: msg.set_boundary(boundary) # Write out any preamble if msg.preamble is not None: self._fp.write(msg.preamble) # First boundary is a bit different; it doesn't have a leading extra # newline. print >> self._fp, '--' + boundary if isdigest: print >> self._fp # Join and write the individual parts joiner = '\n--' + boundary + '\n' if isdigest: # multipart/digest types effectively add an extra newline between # the boundary and the body part. joiner += '\n' self._fp.write(joiner.join(msgtexts)) print >> self._fp, '\n--' + boundary + '--', # Write out any epilogue if msg.epilogue is not None: if not msg.epilogue.startswith('\n'): print >> self._fp self._fp.write(msg.epilogue)
ee084c72c49e60af052e272006f74cb07fc13eb2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ee084c72c49e60af052e272006f74cb07fc13eb2/Generator.py
if isdigest: print >> self._fp
def _handle_multipart(self, msg, isdigest=0): # The trick here is to write out each part separately, merge them all # together, and then make sure that the boundary we've chosen isn't # present in the payload. msgtexts = [] subparts = msg.get_payload() if subparts is None: # Nothing has every been attached boundary = msg.get_boundary(failobj=_make_boundary()) print >> self._fp, '--' + boundary print >> self._fp, '\n' print >> self._fp, '--' + boundary + '--' return elif not isinstance(subparts, ListType): # Scalar payload subparts = [subparts] for part in subparts: s = StringIO() g = self.__class__(s, self._mangle_from_, self.__maxheaderlen) g.flatten(part, unixfrom=0) msgtexts.append(s.getvalue()) # Now make sure the boundary we've selected doesn't appear in any of # the message texts. alltext = NL.join(msgtexts) # BAW: What about boundaries that are wrapped in double-quotes? boundary = msg.get_boundary(failobj=_make_boundary(alltext)) # If we had to calculate a new boundary because the body text # contained that string, set the new boundary. We don't do it # unconditionally because, while set_boundary() preserves order, it # doesn't preserve newlines/continuations in headers. This is no big # deal in practice, but turns out to be inconvenient for the unittest # suite. if msg.get_boundary() <> boundary: msg.set_boundary(boundary) # Write out any preamble if msg.preamble is not None: self._fp.write(msg.preamble) # First boundary is a bit different; it doesn't have a leading extra # newline. print >> self._fp, '--' + boundary if isdigest: print >> self._fp # Join and write the individual parts joiner = '\n--' + boundary + '\n' if isdigest: # multipart/digest types effectively add an extra newline between # the boundary and the body part. joiner += '\n' self._fp.write(joiner.join(msgtexts)) print >> self._fp, '\n--' + boundary + '--', # Write out any epilogue if msg.epilogue is not None: if not msg.epilogue.startswith('\n'): print >> self._fp self._fp.write(msg.epilogue)
ee084c72c49e60af052e272006f74cb07fc13eb2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ee084c72c49e60af052e272006f74cb07fc13eb2/Generator.py
if isdigest: joiner += '\n'
def _handle_multipart(self, msg, isdigest=0): # The trick here is to write out each part separately, merge them all # together, and then make sure that the boundary we've chosen isn't # present in the payload. msgtexts = [] subparts = msg.get_payload() if subparts is None: # Nothing has every been attached boundary = msg.get_boundary(failobj=_make_boundary()) print >> self._fp, '--' + boundary print >> self._fp, '\n' print >> self._fp, '--' + boundary + '--' return elif not isinstance(subparts, ListType): # Scalar payload subparts = [subparts] for part in subparts: s = StringIO() g = self.__class__(s, self._mangle_from_, self.__maxheaderlen) g.flatten(part, unixfrom=0) msgtexts.append(s.getvalue()) # Now make sure the boundary we've selected doesn't appear in any of # the message texts. alltext = NL.join(msgtexts) # BAW: What about boundaries that are wrapped in double-quotes? boundary = msg.get_boundary(failobj=_make_boundary(alltext)) # If we had to calculate a new boundary because the body text # contained that string, set the new boundary. We don't do it # unconditionally because, while set_boundary() preserves order, it # doesn't preserve newlines/continuations in headers. This is no big # deal in practice, but turns out to be inconvenient for the unittest # suite. if msg.get_boundary() <> boundary: msg.set_boundary(boundary) # Write out any preamble if msg.preamble is not None: self._fp.write(msg.preamble) # First boundary is a bit different; it doesn't have a leading extra # newline. print >> self._fp, '--' + boundary if isdigest: print >> self._fp # Join and write the individual parts joiner = '\n--' + boundary + '\n' if isdigest: # multipart/digest types effectively add an extra newline between # the boundary and the body part. joiner += '\n' self._fp.write(joiner.join(msgtexts)) print >> self._fp, '\n--' + boundary + '--', # Write out any epilogue if msg.epilogue is not None: if not msg.epilogue.startswith('\n'): print >> self._fp self._fp.write(msg.epilogue)
ee084c72c49e60af052e272006f74cb07fc13eb2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ee084c72c49e60af052e272006f74cb07fc13eb2/Generator.py
def _handle_multipart_digest(self, msg): self._handle_multipart(msg, isdigest=1)
def _handle_multipart_digest(self, msg): self._handle_multipart(msg, isdigest=1)
ee084c72c49e60af052e272006f74cb07fc13eb2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ee084c72c49e60af052e272006f74cb07fc13eb2/Generator.py
g = self.__class__(s, self._mangle_from_, self.__maxheaderlen)
g = self.clone(s)
def _handle_message_delivery_status(self, msg): # We can't just write the headers directly to self's file object # because this will leave an extra newline between the last header # block and the boundary. Sigh. blocks = [] for part in msg.get_payload(): s = StringIO() g = self.__class__(s, self._mangle_from_, self.__maxheaderlen) g.flatten(part, unixfrom=0) text = s.getvalue() lines = text.split('\n') # Strip off the unnecessary trailing empty line if lines and lines[-1] == '': blocks.append(NL.join(lines[:-1])) else: blocks.append(text) # Now join all the blocks with an empty line. This has the lovely # effect of separating each block with an empty line, but not adding # an extra one after the last one. self._fp.write(NL.join(blocks))
ee084c72c49e60af052e272006f74cb07fc13eb2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ee084c72c49e60af052e272006f74cb07fc13eb2/Generator.py
g = self.__class__(s, self._mangle_from_, self.__maxheaderlen)
g = self.clone(s)
def _handle_message(self, msg): s = StringIO() g = self.__class__(s, self._mangle_from_, self.__maxheaderlen) # The payload of a message/rfc822 part should be a multipart sequence # of length 1. The zeroth element of the list should be the Message # object for the subpart.Extract that object, stringify it, and write # that out. g.flatten(msg.get_payload(0), unixfrom=0) self._fp.write(s.getvalue())
ee084c72c49e60af052e272006f74cb07fc13eb2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ee084c72c49e60af052e272006f74cb07fc13eb2/Generator.py
def _handle_message(self, msg): s = StringIO() g = self.__class__(s, self._mangle_from_, self.__maxheaderlen) # The payload of a message/rfc822 part should be a multipart sequence # of length 1. The zeroth element of the list should be the Message # object for the subpart.Extract that object, stringify it, and write # that out. g.flatten(msg.get_payload(0), unixfrom=0) self._fp.write(s.getvalue())
ee084c72c49e60af052e272006f74cb07fc13eb2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ee084c72c49e60af052e272006f74cb07fc13eb2/Generator.py
if high_bits == 0 or high_bits == -1:
if high_bits == 0 or high_bits == -1:
def save_int(self, object, pack=struct.pack): if self.bin: # If the int is small enough to fit in a signed 4-byte 2's-comp # format, we can store it more efficiently than the general # case. # First one- and two-byte unsigned ints: if object >= 0: if object <= 0xff: self.write(BININT1 + chr(object)) return if object <= 0xffff: self.write(BININT2 + chr(object&0xff) + chr(object>>8)) return # Next check for 4-byte signed ints: high_bits = object >> 31 # note that Python shift sign-extends if high_bits == 0 or high_bits == -1: # All high bits are copies of bit 2**31, so the value # fits in a 4-byte signed int. self.write(BININT + pack("<i", object)) return # Text pickle, or int too big to fit in signed 4-byte format. self.write(INT + `object` + '\n')
23c9702db5ea907d4debcffac999edb2c394f65b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/23c9702db5ea907d4debcffac999edb2c394f65b/pickle.py
"can't pickle %s objects" % `t.__name__`
"can't pickle %s object: %s" % (`t.__name__`, `object`)
def save(self, object, pers_save = 0): memo = self.memo
fa32bde378c31ea1efccd329cf46872a844250d3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/fa32bde378c31ea1efccd329cf46872a844250d3/pickle.py
SIGNATURE='MOSS'
def openfile(self, path, activate = 1): if activate: self.activate() self.OpenURL("file:///" + string.join(string.split(path,':'), '/'))
3aef7b8906f19285954580a0ad6a66ca9e0ec783 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3aef7b8906f19285954580a0ad6a66ca9e0ec783/PyDocSearch.py
lowertext = string.lower(text)
lowertext = text.lower()
def sucktitle(path): f = open(path) text = f.read(1024) # assume the title is in the first 1024 bytes f.close() lowertext = string.lower(text) matcher = _titlepat.search(lowertext) if matcher: return matcher.group(1) return path
3aef7b8906f19285954580a0ad6a66ca9e0ec783 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3aef7b8906f19285954580a0ad6a66ca9e0ec783/PyDocSearch.py
self.w.results = TwoLineList((-1, -1, 1, -14), nicehits, self.listhit)
self.w.results = W.TwoLineList((-1, -1, 1, -14), nicehits, self.listhit)
def __init__(self, hits): global _resultscounter hits = map(lambda (path, hits): (sucktitle(path), path, hits), hits) hits.sort() self.hits = hits nicehits = map( lambda (title, path, hits): title + '\r' + string.join( map(lambda (c, p): "%s (%d)" % (p, c), hits), ', '), hits) nicehits.sort() self.w = W.Window((440, 300), "Search results %d" % _resultscounter, minsize = (200, 100)) self.w.results = TwoLineList((-1, -1, 1, -14), nicehits, self.listhit) self.w.open() self.w.bind('return', self.listhit) self.w.bind('enter', self.listhit) _resultscounter = _resultscounter + 1 self.browser = None
3aef7b8906f19285954580a0ad6a66ca9e0ec783 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3aef7b8906f19285954580a0ad6a66ca9e0ec783/PyDocSearch.py
self.browser = None
def __init__(self, hits): global _resultscounter hits = map(lambda (path, hits): (sucktitle(path), path, hits), hits) hits.sort() self.hits = hits nicehits = map( lambda (title, path, hits): title + '\r' + string.join( map(lambda (c, p): "%s (%d)" % (p, c), hits), ', '), hits) nicehits.sort() self.w = W.Window((440, 300), "Search results %d" % _resultscounter, minsize = (200, 100)) self.w.results = TwoLineList((-1, -1, 1, -14), nicehits, self.listhit) self.w.open() self.w.bind('return', self.listhit) self.w.bind('enter', self.listhit) _resultscounter = _resultscounter + 1 self.browser = None
3aef7b8906f19285954580a0ad6a66ca9e0ec783 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3aef7b8906f19285954580a0ad6a66ca9e0ec783/PyDocSearch.py
if self.browser is None: self.browser = WebBrowser(SIGNATURE, start = 1) self.browser.openfile(self.hits[i][1])
path = self.hits[i][1] url = "file://" + "/".join(path.split(":")) webbrowser.open(url)
def listhit(self, isdbl = 1): if isdbl: for i in self.w.results.getselection(): if self.browser is None: self.browser = WebBrowser(SIGNATURE, start = 1) self.browser.openfile(self.hits[i][1])
3aef7b8906f19285954580a0ad6a66ca9e0ec783 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3aef7b8906f19285954580a0ad6a66ca9e0ec783/PyDocSearch.py
self.w.searching = W.TextBox((4, 4, -4, 16), "DevDev:PyPyDoc 1.5.1:ext:parseTupleAndKeywords.html")
self.w.searching = W.TextBox((4, 4, -4, 16), "")
def __init__(self): self.w = W.Dialog((440, 64), "Searching\xc9") self.w.searching = W.TextBox((4, 4, -4, 16), "DevDev:PyPyDoc 1.5.1:ext:parseTupleAndKeywords.html") self.w.hits = W.TextBox((4, 24, -4, 16), "Hits: 0") self.w.canceltip = W.TextBox((4, 44, -4, 16), "Type cmd-period (.) to cancel.") self.w.open()
3aef7b8906f19285954580a0ad6a66ca9e0ec783 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3aef7b8906f19285954580a0ad6a66ca9e0ec783/PyDocSearch.py
self.w.setdocfolderbutton = W.Button((10, -30, 80, 16), "Set doc folder", self.setdocpath)
self.w.setdocfolderbutton = W.Button((10, -30, 100, 16), "Set doc folder", self.setdocpath)
def __init__(self): prefs = MacPrefs.GetPrefs(W.getapplication().preffilepath) try: (docpath, kind, case, word, tut, lib, ref, ext, api) = prefs.docsearchengine except: (docpath, kind, case, word, tut, lib, ref, ext, api) = prefs.docsearchengine = \ ("", 0, 0, 0, 1, 1, 0, 0, 0) if docpath and not verifydocpath(docpath): docpath = "" self.w = W.Window((400, 200), "Search the Python Documentation") self.w.searchtext = W.EditText((10, 10, -100, 20), callback = self.checkbuttons) self.w.searchbutton = W.Button((-90, 12, 80, 16), "Search", self.search) buttons = [] gutter = 10 width = 130 bookstart = width + 2 * gutter self.w.phraseradio = W.RadioButton((10, 38, width, 16), "As a phrase", buttons) self.w.allwordsradio = W.RadioButton((10, 58, width, 16), "All words", buttons) self.w.anywordsradio = W.RadioButton((10, 78, width, 16), "Any word", buttons) self.w.casesens = W.CheckBox((10, 98, width, 16), "Case sensitive") self.w.wholewords = W.CheckBox((10, 118, width, 16), "Whole words") self.w.tutorial = W.CheckBox((bookstart, 38, -10, 16), "Tutorial") self.w.library = W.CheckBox((bookstart, 58, -10, 16), "Library reference") self.w.langueref = W.CheckBox((bookstart, 78, -10, 16), "Lanuage reference manual") self.w.extending = W.CheckBox((bookstart, 98, -10, 16), "Extending & embedding") self.w.api = W.CheckBox((bookstart, 118, -10, 16), "C/C++ API") self.w.setdocfolderbutton = W.Button((10, -30, 80, 16), "Set doc folder", self.setdocpath) if docpath: self.w.setdefaultbutton(self.w.searchbutton) else: self.w.setdefaultbutton(self.w.setdocfolderbutton) self.docpath = docpath if not docpath: docpath = "(please select the Python html documentation folder)" self.w.docfolder = W.TextBox((100, -28, -10, 16), docpath) [self.w.phraseradio, self.w.allwordsradio, self.w.anywordsradio][kind].set(1) self.w.casesens.set(case) self.w.wholewords.set(word) self.w.tutorial.set(tut) self.w.library.set(lib) self.w.langueref.set(ref) self.w.extending.set(ext) self.w.api.set(api) self.w.open() self.w.wholewords.enable(0) self.w.bind('<close>', self.close) self.w.searchbutton.enable(0)
3aef7b8906f19285954580a0ad6a66ca9e0ec783 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3aef7b8906f19285954580a0ad6a66ca9e0ec783/PyDocSearch.py
self.w.docfolder = W.TextBox((100, -28, -10, 16), docpath)
self.w.docfolder = W.TextBox((120, -28, -10, 16), docpath)
def __init__(self): prefs = MacPrefs.GetPrefs(W.getapplication().preffilepath) try: (docpath, kind, case, word, tut, lib, ref, ext, api) = prefs.docsearchengine except: (docpath, kind, case, word, tut, lib, ref, ext, api) = prefs.docsearchengine = \ ("", 0, 0, 0, 1, 1, 0, 0, 0) if docpath and not verifydocpath(docpath): docpath = "" self.w = W.Window((400, 200), "Search the Python Documentation") self.w.searchtext = W.EditText((10, 10, -100, 20), callback = self.checkbuttons) self.w.searchbutton = W.Button((-90, 12, 80, 16), "Search", self.search) buttons = [] gutter = 10 width = 130 bookstart = width + 2 * gutter self.w.phraseradio = W.RadioButton((10, 38, width, 16), "As a phrase", buttons) self.w.allwordsradio = W.RadioButton((10, 58, width, 16), "All words", buttons) self.w.anywordsradio = W.RadioButton((10, 78, width, 16), "Any word", buttons) self.w.casesens = W.CheckBox((10, 98, width, 16), "Case sensitive") self.w.wholewords = W.CheckBox((10, 118, width, 16), "Whole words") self.w.tutorial = W.CheckBox((bookstart, 38, -10, 16), "Tutorial") self.w.library = W.CheckBox((bookstart, 58, -10, 16), "Library reference") self.w.langueref = W.CheckBox((bookstart, 78, -10, 16), "Lanuage reference manual") self.w.extending = W.CheckBox((bookstart, 98, -10, 16), "Extending & embedding") self.w.api = W.CheckBox((bookstart, 118, -10, 16), "C/C++ API") self.w.setdocfolderbutton = W.Button((10, -30, 80, 16), "Set doc folder", self.setdocpath) if docpath: self.w.setdefaultbutton(self.w.searchbutton) else: self.w.setdefaultbutton(self.w.setdocfolderbutton) self.docpath = docpath if not docpath: docpath = "(please select the Python html documentation folder)" self.w.docfolder = W.TextBox((100, -28, -10, 16), docpath) [self.w.phraseradio, self.w.allwordsradio, self.w.anywordsradio][kind].set(1) self.w.casesens.set(case) self.w.wholewords.set(word) self.w.tutorial.set(tut) self.w.library.set(lib) self.w.langueref.set(ref) self.w.extending.set(ext) self.w.api.set(api) self.w.open() self.w.wholewords.enable(0) self.w.bind('<close>', self.close) self.w.searchbutton.enable(0)
3aef7b8906f19285954580a0ad6a66ca9e0ec783 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3aef7b8906f19285954580a0ad6a66ca9e0ec783/PyDocSearch.py
def search(self): hits = dosearch(self.docpath, self.w.searchtext.get(), self.getsettings()) if hits: Results(hits) elif hasattr(MacOS, 'SysBeep'): MacOS.SysBeep(0) #import PyBrowser #PyBrowser.Browser(hits)
3aef7b8906f19285954580a0ad6a66ca9e0ec783 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3aef7b8906f19285954580a0ad6a66ca9e0ec783/PyDocSearch.py
d = new.code(3, 3, 3, 3, codestr, (), (), (), "<string>", "<name>", 1, "", (), ())
def f(a): pass c = f.func_code argcount = c.co_argcount nlocals = c.co_nlocals stacksize = c.co_stacksize flags = c.co_flags codestring = c.co_code constants = c.co_consts names = c.co_names varnames = c.co_varnames filename = c.co_filename name = c.co_name firstlineno = c.co_firstlineno lnotab = c.co_lnotab freevars = c.co_freevars cellvars = c.co_cellvars d = new.code(argcount, nlocals, stacksize, flags, codestring, constants, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars)
def test_closure(func, closure, exc): try: new.function(func.func_code, {}, "", None, closure) except exc: pass else: print "corrupt closure accepted"
d51ef676f001b1909129483367ebf01b5c0ec36e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d51ef676f001b1909129483367ebf01b5c0ec36e/test_new.py
d = new.code(3, 3, 3, 3, codestr, (), (), (), "<string>", "<name>", 1, "")
d = new.code(argcount, nlocals, stacksize, flags, codestring, constants, names, varnames, filename, name, firstlineno, lnotab) try: d = new.code(-argcount, nlocals, stacksize, flags, codestring, constants, names, varnames, filename, name, firstlineno, lnotab) except ValueError: pass else: raise TestFailed, "negative co_argcount didn't trigger an exception" try: d = new.code(argcount, -nlocals, stacksize, flags, codestring, constants, names, varnames, filename, name, firstlineno, lnotab) except ValueError: pass else: raise TestFailed, "negative co_nlocals didn't trigger an exception" try: d = new.code(argcount, nlocals, stacksize, flags, codestring, constants, (5,), varnames, filename, name, firstlineno, lnotab) except TypeError: pass else: raise TestFailed, "non-string co_name didn't trigger an exception" class S(str): pass t = (S("ab"),) d = new.code(argcount, nlocals, stacksize, flags, codestring, constants, t, varnames, filename, name, firstlineno, lnotab) verify(type(t[0]) is S, "eek, tuple changed under us!")
def test_closure(func, closure, exc): try: new.function(func.func_code, {}, "", None, closure) except exc: pass else: print "corrupt closure accepted"
d51ef676f001b1909129483367ebf01b5c0ec36e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/d51ef676f001b1909129483367ebf01b5c0ec36e/test_new.py
if v == "1": g[n] = 1 else: g[n] = v
try: v = string.atoi(v) except ValueError: pass g[n] = v
undef_rx = re.compile("/[*] #undef ([A-Z][A-Z0-9_]+) [*]/\n")
26f241773de7e539dd0c9a75c468038fd0e9b83d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/26f241773de7e539dd0c9a75c468038fd0e9b83d/sysconfig.py
done[name] = value
try: value = string.atoi(value) except ValueError: pass done[name] = string.strip(value)
undef_rx = re.compile("/[*] #undef ([A-Z][A-Z0-9_]+) [*]/\n")
26f241773de7e539dd0c9a75c468038fd0e9b83d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/26f241773de7e539dd0c9a75c468038fd0e9b83d/sysconfig.py
done[name] = value
try: value = string.atoi(value) except ValueError: pass done[name] = string.strip(value)
undef_rx = re.compile("/[*] #undef ([A-Z][A-Z0-9_]+) [*]/\n")
26f241773de7e539dd0c9a75c468038fd0e9b83d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/26f241773de7e539dd0c9a75c468038fd0e9b83d/sysconfig.py
raise UnimplementedError
raise NotImplementedError
def process_message(self, peer, mailfrom, rcpttos, data): """Override this abstract method to handle messages from the client.
baf578f69af565369958dbd8d1982da04a1862b5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/baf578f69af565369958dbd8d1982da04a1862b5/smtpd.py
The optional second argument can specify an alternative default."""
The optional second argument can specify an alternate default."""
def getenv(key, default=None): """Get an environment variable, return None if it doesn't exist. The optional second argument can specify an alternative default.""" return environ.get(key, default)
6f59ab0abfe92f46b5b0c6b3e2db3be366c52ec3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6f59ab0abfe92f46b5b0c6b3e2db3be366c52ec3/os.py
assert mode[:1] in ("b", "t")
def popen2(cmd, mode="t", bufsize=-1): assert mode[:1] in ("b", "t") import popen2 stdout, stdin = popen2.popen2(cmd, bufsize) return stdin, stdout
6f59ab0abfe92f46b5b0c6b3e2db3be366c52ec3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6f59ab0abfe92f46b5b0c6b3e2db3be366c52ec3/os.py
if status != 100:
if status != CONTINUE:
def begin(self): if self.msg is not None: # we've already started reading the response return
420d6640e8689ee2e06c8407789613f6cd085c16 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/420d6640e8689ee2e06c8407789613f6cd085c16/httplib.py
if (status == 204 or status == 304 or
if (status == NO_CONTENT or status == NOT_MODIFIED or
def begin(self): if self.msg is not None: # we've already started reading the response return
420d6640e8689ee2e06c8407789613f6cd085c16 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/420d6640e8689ee2e06c8407789613f6cd085c16/httplib.py
return unittest.TestSuite((sqlite_type_suite, decltypes_type_suite, colnames_type_suite, adaptation_suite, date_suite))
return unittest.TestSuite((sqlite_type_suite, decltypes_type_suite, colnames_type_suite, adaptation_suite, bin_suite, date_suite))
def suite(): sqlite_type_suite = unittest.makeSuite(SqliteTypeTests, "Check") decltypes_type_suite = unittest.makeSuite(DeclTypesTests, "Check") colnames_type_suite = unittest.makeSuite(ColNamesTests, "Check") adaptation_suite = unittest.makeSuite(ObjectAdaptationTests, "Check") date_suite = unittest.makeSuite(DateTimeTests, "Check") return unittest.TestSuite((sqlite_type_suite, decltypes_type_suite, colnames_type_suite, adaptation_suite, date_suite))
02280dd3902b818c8900074365b1af907f33356c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/02280dd3902b818c8900074365b1af907f33356c/types.py
parts = parts[1:]
parts = parts[1:]
def loadTestsFromName(self, name, module=None): """Return a suite of all tests cases given a string specifier.
38c65d291cc30cfc884c22b37fff913cb5df6d34 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/38c65d291cc30cfc884c22b37fff913cb5df6d34/unittest.py
filter = callable
filter = template
def _subn(pattern, template, string, count=0): # internal: pattern.subn implementation hook if callable(template): filter = callable else: # FIXME: prepare template def filter(match, template=template): return _expand(match, template) n = i = 0 s = [] append = s.append c = pattern.cursor(string) while not count or n < count: m = c.search() if not m: break j = m.start() if j > i: append(string[i:j]) append(filter(m)) i = m.end() n = n + 1 if i < len(string): append(string[i:]) return string[:0].join(s), n
717e89d3eac2787c3c93edf08ce7d1af4e3b95a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/717e89d3eac2787c3c93edf08ce7d1af4e3b95a7/sre.py
def __init__(self, root=None, font=None, name=None, **options):
def __init__(self, root=None, font=None, name=None, exists=False, **options):
def __init__(self, root=None, font=None, name=None, **options): if not root: root = Tkinter._default_root if font: # get actual settings corresponding to the given font font = root.tk.splitlist(root.tk.call("font", "actual", font)) else: font = self._set(options) if not name: name = "font" + str(id(self)) self.name = name root.tk.call("font", "create", name, *font) # backlinks! self._root = root self._split = root.tk.splitlist self._call = root.tk.call
05b523f61891071632227fc5212c1db4f69d5611 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/05b523f61891071632227fc5212c1db4f69d5611/tkFont.py
root.tk.call("font", "create", name, *font)
if exists: self.delete_font = False if self.name not in root.tk.call("font", "names"): raise Tkinter._tkinter.TclError, "named font %s does not already exist" % (self.name,) if font: print "font=%r" % font root.tk.call("font", "configure", self.name, *font) else: root.tk.call("font", "create", self.name, *font) self.delete_font = True
def __init__(self, root=None, font=None, name=None, **options): if not root: root = Tkinter._default_root if font: # get actual settings corresponding to the given font font = root.tk.splitlist(root.tk.call("font", "actual", font)) else: font = self._set(options) if not name: name = "font" + str(id(self)) self.name = name root.tk.call("font", "create", name, *font) # backlinks! self._root = root self._split = root.tk.splitlist self._call = root.tk.call
05b523f61891071632227fc5212c1db4f69d5611 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/05b523f61891071632227fc5212c1db4f69d5611/tkFont.py
self._call("font", "delete", self.name)
if self.delete_font: self._call("font", "delete", self.name)
def __del__(self): try: self._call("font", "delete", self.name) except (AttributeError, Tkinter.TclError): pass
05b523f61891071632227fc5212c1db4f69d5611 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/05b523f61891071632227fc5212c1db4f69d5611/tkFont.py
def __del__(self): try: self._call("font", "delete", self.name) except (AttributeError, Tkinter.TclError): pass
05b523f61891071632227fc5212c1db4f69d5611 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/05b523f61891071632227fc5212c1db4f69d5611/tkFont.py
for base in (_HKEY_CLASSES_ROOT, _HKEY_LOCAL_MACHINE, _HKEY_CURRENT_USER, _HKEY_USERS):
for base in (HKEY_CLASSES_ROOT, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER, HKEY_USERS):
def get_devstudio_versions (): """Get list of devstudio versions from the Windows registry. Return a list of strings containing version numbers; the list will be empty if we were unable to access the registry (eg. couldn't import a registry-access module) or the appropriate registry keys weren't found.""" if not _can_read_reg: return [] K = 'Software\\Microsoft\\Devstudio' L = [] for base in (_HKEY_CLASSES_ROOT, _HKEY_LOCAL_MACHINE, _HKEY_CURRENT_USER, _HKEY_USERS): try: k = _RegOpenKeyEx(base,K) i = 0 while 1: try: p = _RegEnumKey(k,i) if p[0] in '123456789' and p not in L: L.append(p) except _RegError: break i = i + 1 except _RegError: pass L.sort() L.reverse() return L
2f60430e0909b2e5da8b0cca0f12721fa270d324 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/2f60430e0909b2e5da8b0cca0f12721fa270d324/msvccompiler.py
k = _RegOpenKeyEx(base,K)
k = RegOpenKeyEx(base,K)
def get_devstudio_versions (): """Get list of devstudio versions from the Windows registry. Return a list of strings containing version numbers; the list will be empty if we were unable to access the registry (eg. couldn't import a registry-access module) or the appropriate registry keys weren't found.""" if not _can_read_reg: return [] K = 'Software\\Microsoft\\Devstudio' L = [] for base in (_HKEY_CLASSES_ROOT, _HKEY_LOCAL_MACHINE, _HKEY_CURRENT_USER, _HKEY_USERS): try: k = _RegOpenKeyEx(base,K) i = 0 while 1: try: p = _RegEnumKey(k,i) if p[0] in '123456789' and p not in L: L.append(p) except _RegError: break i = i + 1 except _RegError: pass L.sort() L.reverse() return L
2f60430e0909b2e5da8b0cca0f12721fa270d324 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/2f60430e0909b2e5da8b0cca0f12721fa270d324/msvccompiler.py
p = _RegEnumKey(k,i)
p = RegEnumKey(k,i)
def get_devstudio_versions (): """Get list of devstudio versions from the Windows registry. Return a list of strings containing version numbers; the list will be empty if we were unable to access the registry (eg. couldn't import a registry-access module) or the appropriate registry keys weren't found.""" if not _can_read_reg: return [] K = 'Software\\Microsoft\\Devstudio' L = [] for base in (_HKEY_CLASSES_ROOT, _HKEY_LOCAL_MACHINE, _HKEY_CURRENT_USER, _HKEY_USERS): try: k = _RegOpenKeyEx(base,K) i = 0 while 1: try: p = _RegEnumKey(k,i) if p[0] in '123456789' and p not in L: L.append(p) except _RegError: break i = i + 1 except _RegError: pass L.sort() L.reverse() return L
2f60430e0909b2e5da8b0cca0f12721fa270d324 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/2f60430e0909b2e5da8b0cca0f12721fa270d324/msvccompiler.py
except _RegError:
except RegError:
def get_devstudio_versions (): """Get list of devstudio versions from the Windows registry. Return a list of strings containing version numbers; the list will be empty if we were unable to access the registry (eg. couldn't import a registry-access module) or the appropriate registry keys weren't found.""" if not _can_read_reg: return [] K = 'Software\\Microsoft\\Devstudio' L = [] for base in (_HKEY_CLASSES_ROOT, _HKEY_LOCAL_MACHINE, _HKEY_CURRENT_USER, _HKEY_USERS): try: k = _RegOpenKeyEx(base,K) i = 0 while 1: try: p = _RegEnumKey(k,i) if p[0] in '123456789' and p not in L: L.append(p) except _RegError: break i = i + 1 except _RegError: pass L.sort() L.reverse() return L
2f60430e0909b2e5da8b0cca0f12721fa270d324 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/2f60430e0909b2e5da8b0cca0f12721fa270d324/msvccompiler.py
except _RegError:
except RegError:
def get_devstudio_versions (): """Get list of devstudio versions from the Windows registry. Return a list of strings containing version numbers; the list will be empty if we were unable to access the registry (eg. couldn't import a registry-access module) or the appropriate registry keys weren't found.""" if not _can_read_reg: return [] K = 'Software\\Microsoft\\Devstudio' L = [] for base in (_HKEY_CLASSES_ROOT, _HKEY_LOCAL_MACHINE, _HKEY_CURRENT_USER, _HKEY_USERS): try: k = _RegOpenKeyEx(base,K) i = 0 while 1: try: p = _RegEnumKey(k,i) if p[0] in '123456789' and p not in L: L.append(p) except _RegError: break i = i + 1 except _RegError: pass L.sort() L.reverse() return L
2f60430e0909b2e5da8b0cca0f12721fa270d324 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/2f60430e0909b2e5da8b0cca0f12721fa270d324/msvccompiler.py
for base in (_HKEY_CLASSES_ROOT, _HKEY_LOCAL_MACHINE, _HKEY_CURRENT_USER, _HKEY_USERS):
for base in (HKEY_CLASSES_ROOT, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER, HKEY_USERS):
def get_msvc_paths (path, version='6.0', platform='x86'): """Get a list of devstudio directories (include, lib or path). Return a list of strings; will be empty list if unable to access the registry or appropriate registry keys not found.""" if not _can_read_reg: return [] L = [] if path=='lib': path= 'Library' path = string.upper(path + ' Dirs') K = ('Software\\Microsoft\\Devstudio\\%s\\' + 'Build System\\Components\\Platforms\\Win32 (%s)\\Directories') % \ (version,platform) for base in (_HKEY_CLASSES_ROOT, _HKEY_LOCAL_MACHINE, _HKEY_CURRENT_USER, _HKEY_USERS): try: k = _RegOpenKeyEx(base,K) i = 0 while 1: try: (p,v,t) = _RegEnumValue(k,i) if string.upper(p) == path: V = string.split(v,';') for v in V: if v == '' or v in L: continue L.append(v) break i = i + 1 except _RegError: break except _RegError: pass return L
2f60430e0909b2e5da8b0cca0f12721fa270d324 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/2f60430e0909b2e5da8b0cca0f12721fa270d324/msvccompiler.py
k = _RegOpenKeyEx(base,K)
k = RegOpenKeyEx(base,K)
def get_msvc_paths (path, version='6.0', platform='x86'): """Get a list of devstudio directories (include, lib or path). Return a list of strings; will be empty list if unable to access the registry or appropriate registry keys not found.""" if not _can_read_reg: return [] L = [] if path=='lib': path= 'Library' path = string.upper(path + ' Dirs') K = ('Software\\Microsoft\\Devstudio\\%s\\' + 'Build System\\Components\\Platforms\\Win32 (%s)\\Directories') % \ (version,platform) for base in (_HKEY_CLASSES_ROOT, _HKEY_LOCAL_MACHINE, _HKEY_CURRENT_USER, _HKEY_USERS): try: k = _RegOpenKeyEx(base,K) i = 0 while 1: try: (p,v,t) = _RegEnumValue(k,i) if string.upper(p) == path: V = string.split(v,';') for v in V: if v == '' or v in L: continue L.append(v) break i = i + 1 except _RegError: break except _RegError: pass return L
2f60430e0909b2e5da8b0cca0f12721fa270d324 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/2f60430e0909b2e5da8b0cca0f12721fa270d324/msvccompiler.py
(p,v,t) = _RegEnumValue(k,i)
(p,v,t) = RegEnumValue(k,i)
def get_msvc_paths (path, version='6.0', platform='x86'): """Get a list of devstudio directories (include, lib or path). Return a list of strings; will be empty list if unable to access the registry or appropriate registry keys not found.""" if not _can_read_reg: return [] L = [] if path=='lib': path= 'Library' path = string.upper(path + ' Dirs') K = ('Software\\Microsoft\\Devstudio\\%s\\' + 'Build System\\Components\\Platforms\\Win32 (%s)\\Directories') % \ (version,platform) for base in (_HKEY_CLASSES_ROOT, _HKEY_LOCAL_MACHINE, _HKEY_CURRENT_USER, _HKEY_USERS): try: k = _RegOpenKeyEx(base,K) i = 0 while 1: try: (p,v,t) = _RegEnumValue(k,i) if string.upper(p) == path: V = string.split(v,';') for v in V: if v == '' or v in L: continue L.append(v) break i = i + 1 except _RegError: break except _RegError: pass return L
2f60430e0909b2e5da8b0cca0f12721fa270d324 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/2f60430e0909b2e5da8b0cca0f12721fa270d324/msvccompiler.py
except _RegError:
except RegError:
def get_msvc_paths (path, version='6.0', platform='x86'): """Get a list of devstudio directories (include, lib or path). Return a list of strings; will be empty list if unable to access the registry or appropriate registry keys not found.""" if not _can_read_reg: return [] L = [] if path=='lib': path= 'Library' path = string.upper(path + ' Dirs') K = ('Software\\Microsoft\\Devstudio\\%s\\' + 'Build System\\Components\\Platforms\\Win32 (%s)\\Directories') % \ (version,platform) for base in (_HKEY_CLASSES_ROOT, _HKEY_LOCAL_MACHINE, _HKEY_CURRENT_USER, _HKEY_USERS): try: k = _RegOpenKeyEx(base,K) i = 0 while 1: try: (p,v,t) = _RegEnumValue(k,i) if string.upper(p) == path: V = string.split(v,';') for v in V: if v == '' or v in L: continue L.append(v) break i = i + 1 except _RegError: break except _RegError: pass return L
2f60430e0909b2e5da8b0cca0f12721fa270d324 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/2f60430e0909b2e5da8b0cca0f12721fa270d324/msvccompiler.py
except _RegError:
except RegError:
def get_msvc_paths (path, version='6.0', platform='x86'): """Get a list of devstudio directories (include, lib or path). Return a list of strings; will be empty list if unable to access the registry or appropriate registry keys not found.""" if not _can_read_reg: return [] L = [] if path=='lib': path= 'Library' path = string.upper(path + ' Dirs') K = ('Software\\Microsoft\\Devstudio\\%s\\' + 'Build System\\Components\\Platforms\\Win32 (%s)\\Directories') % \ (version,platform) for base in (_HKEY_CLASSES_ROOT, _HKEY_LOCAL_MACHINE, _HKEY_CURRENT_USER, _HKEY_USERS): try: k = _RegOpenKeyEx(base,K) i = 0 while 1: try: (p,v,t) = _RegEnumValue(k,i) if string.upper(p) == path: V = string.split(v,';') for v in V: if v == '' or v in L: continue L.append(v) break i = i + 1 except _RegError: break except _RegError: pass return L
2f60430e0909b2e5da8b0cca0f12721fa270d324 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/2f60430e0909b2e5da8b0cca0f12721fa270d324/msvccompiler.py
verify(2*L(3) == 6) verify(L(3)*2 == 6) verify(L(3)*L(2) == 6)
def mysetattr(self, name, value): if name == "spam": raise AttributeError return object.__setattr__(self, name, value)
476196616568024647bcd2d7b2019c527ef99c55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/476196616568024647bcd2d7b2019c527ef99c55/test_descr.py
for k in value.keys():
for k, v in value.items():
def dump_struct(self, value, write, escape=escape): i = id(value) if self.memo.has_key(i): raise TypeError, "cannot marshal recursive dictionaries" self.memo[i] = None dump = self.__dump write("<value><struct>\n") for k in value.keys(): write("<member>\n") if type(k) is not StringType: raise TypeError, "dictionary key must be string" write("<name>%s</name>\n" % escape(k)) dump(value[k], write) write("</member>\n") write("</struct></value>\n") del self.memo[i]
289e2e99acdecc64f67e7986e1e05594b344ba18 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/289e2e99acdecc64f67e7986e1e05594b344ba18/xmlrpclib.py
raise TypeError, "dictionary key must be string"
if unicode and type(k) is UnicodeType: k = k.encode(self.encoding) else: raise TypeError, "dictionary key must be string"
def dump_struct(self, value, write, escape=escape): i = id(value) if self.memo.has_key(i): raise TypeError, "cannot marshal recursive dictionaries" self.memo[i] = None dump = self.__dump write("<value><struct>\n") for k in value.keys(): write("<member>\n") if type(k) is not StringType: raise TypeError, "dictionary key must be string" write("<name>%s</name>\n" % escape(k)) dump(value[k], write) write("</member>\n") write("</struct></value>\n") del self.memo[i]
289e2e99acdecc64f67e7986e1e05594b344ba18 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/289e2e99acdecc64f67e7986e1e05594b344ba18/xmlrpclib.py
dump(value[k], write)
dump(v, write)
def dump_struct(self, value, write, escape=escape): i = id(value) if self.memo.has_key(i): raise TypeError, "cannot marshal recursive dictionaries" self.memo[i] = None dump = self.__dump write("<value><struct>\n") for k in value.keys(): write("<member>\n") if type(k) is not StringType: raise TypeError, "dictionary key must be string" write("<name>%s</name>\n" % escape(k)) dump(value[k], write) write("</member>\n") write("</struct></value>\n") del self.memo[i]
289e2e99acdecc64f67e7986e1e05594b344ba18 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/289e2e99acdecc64f67e7986e1e05594b344ba18/xmlrpclib.py
LE_MAGIC = 0x950412de BE_MAGIC = 0xde120495
LE_MAGIC = 0x950412deL BE_MAGIC = 0xde120495L
def install(self, unicode=0): import __builtin__ __builtin__.__dict__['_'] = unicode and self.ugettext or self.gettext
3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e/gettext.py
MASK = 0xffffffff
def _parse(self, fp): """Override this method to support alternative .mo formats.""" # We need to & all 32 bit unsigned integers with 0xffffffff for # portability to 64 bit machines. MASK = 0xffffffff unpack = struct.unpack filename = getattr(fp, 'name', '') # Parse the .mo file header, which consists of 5 little endian 32 # bit words. self._catalog = catalog = {} buf = fp.read() buflen = len(buf) # Are we big endian or little endian? magic = unpack('<i', buf[:4])[0] & MASK if magic == self.LE_MAGIC: version, msgcount, masteridx, transidx = unpack('<4i', buf[4:20]) ii = '<ii' elif magic == self.BE_MAGIC: version, msgcount, masteridx, transidx = unpack('>4i', buf[4:20]) ii = '>ii' else: raise IOError(0, 'Bad magic number', filename) # more unsigned ints msgcount &= MASK masteridx &= MASK transidx &= MASK # Now put all messages from the .mo file buffer into the catalog # dictionary. for i in xrange(0, msgcount): mlen, moff = unpack(ii, buf[masteridx:masteridx+8]) moff &= MASK mend = moff + (mlen & MASK) tlen, toff = unpack(ii, buf[transidx:transidx+8]) toff &= MASK tend = toff + (tlen & MASK) if mend < buflen and tend < buflen: tmsg = buf[toff:tend] catalog[buf[moff:mend]] = tmsg else: raise IOError(0, 'File is corrupt', filename) # See if we're looking at GNU .mo conventions for metadata if mlen == 0 and tmsg.lower().startswith('project-id-version:'): # Catalog description for item in tmsg.split('\n'): item = item.strip() if not item: continue k, v = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v if k == 'content-type': self._charset = v.split('charset=')[1] # advance to next entry in the seek tables masteridx += 8 transidx += 8
3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e/gettext.py
magic = unpack('<i', buf[:4])[0] & MASK
magic = unpack('<I', buf[:4])[0]
def _parse(self, fp): """Override this method to support alternative .mo formats.""" # We need to & all 32 bit unsigned integers with 0xffffffff for # portability to 64 bit machines. MASK = 0xffffffff unpack = struct.unpack filename = getattr(fp, 'name', '') # Parse the .mo file header, which consists of 5 little endian 32 # bit words. self._catalog = catalog = {} buf = fp.read() buflen = len(buf) # Are we big endian or little endian? magic = unpack('<i', buf[:4])[0] & MASK if magic == self.LE_MAGIC: version, msgcount, masteridx, transidx = unpack('<4i', buf[4:20]) ii = '<ii' elif magic == self.BE_MAGIC: version, msgcount, masteridx, transidx = unpack('>4i', buf[4:20]) ii = '>ii' else: raise IOError(0, 'Bad magic number', filename) # more unsigned ints msgcount &= MASK masteridx &= MASK transidx &= MASK # Now put all messages from the .mo file buffer into the catalog # dictionary. for i in xrange(0, msgcount): mlen, moff = unpack(ii, buf[masteridx:masteridx+8]) moff &= MASK mend = moff + (mlen & MASK) tlen, toff = unpack(ii, buf[transidx:transidx+8]) toff &= MASK tend = toff + (tlen & MASK) if mend < buflen and tend < buflen: tmsg = buf[toff:tend] catalog[buf[moff:mend]] = tmsg else: raise IOError(0, 'File is corrupt', filename) # See if we're looking at GNU .mo conventions for metadata if mlen == 0 and tmsg.lower().startswith('project-id-version:'): # Catalog description for item in tmsg.split('\n'): item = item.strip() if not item: continue k, v = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v if k == 'content-type': self._charset = v.split('charset=')[1] # advance to next entry in the seek tables masteridx += 8 transidx += 8
3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e/gettext.py
version, msgcount, masteridx, transidx = unpack('<4i', buf[4:20]) ii = '<ii'
version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20]) ii = '<II'
def _parse(self, fp): """Override this method to support alternative .mo formats.""" # We need to & all 32 bit unsigned integers with 0xffffffff for # portability to 64 bit machines. MASK = 0xffffffff unpack = struct.unpack filename = getattr(fp, 'name', '') # Parse the .mo file header, which consists of 5 little endian 32 # bit words. self._catalog = catalog = {} buf = fp.read() buflen = len(buf) # Are we big endian or little endian? magic = unpack('<i', buf[:4])[0] & MASK if magic == self.LE_MAGIC: version, msgcount, masteridx, transidx = unpack('<4i', buf[4:20]) ii = '<ii' elif magic == self.BE_MAGIC: version, msgcount, masteridx, transidx = unpack('>4i', buf[4:20]) ii = '>ii' else: raise IOError(0, 'Bad magic number', filename) # more unsigned ints msgcount &= MASK masteridx &= MASK transidx &= MASK # Now put all messages from the .mo file buffer into the catalog # dictionary. for i in xrange(0, msgcount): mlen, moff = unpack(ii, buf[masteridx:masteridx+8]) moff &= MASK mend = moff + (mlen & MASK) tlen, toff = unpack(ii, buf[transidx:transidx+8]) toff &= MASK tend = toff + (tlen & MASK) if mend < buflen and tend < buflen: tmsg = buf[toff:tend] catalog[buf[moff:mend]] = tmsg else: raise IOError(0, 'File is corrupt', filename) # See if we're looking at GNU .mo conventions for metadata if mlen == 0 and tmsg.lower().startswith('project-id-version:'): # Catalog description for item in tmsg.split('\n'): item = item.strip() if not item: continue k, v = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v if k == 'content-type': self._charset = v.split('charset=')[1] # advance to next entry in the seek tables masteridx += 8 transidx += 8
3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e/gettext.py
version, msgcount, masteridx, transidx = unpack('>4i', buf[4:20]) ii = '>ii'
version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20]) ii = '>II'
def _parse(self, fp): """Override this method to support alternative .mo formats.""" # We need to & all 32 bit unsigned integers with 0xffffffff for # portability to 64 bit machines. MASK = 0xffffffff unpack = struct.unpack filename = getattr(fp, 'name', '') # Parse the .mo file header, which consists of 5 little endian 32 # bit words. self._catalog = catalog = {} buf = fp.read() buflen = len(buf) # Are we big endian or little endian? magic = unpack('<i', buf[:4])[0] & MASK if magic == self.LE_MAGIC: version, msgcount, masteridx, transidx = unpack('<4i', buf[4:20]) ii = '<ii' elif magic == self.BE_MAGIC: version, msgcount, masteridx, transidx = unpack('>4i', buf[4:20]) ii = '>ii' else: raise IOError(0, 'Bad magic number', filename) # more unsigned ints msgcount &= MASK masteridx &= MASK transidx &= MASK # Now put all messages from the .mo file buffer into the catalog # dictionary. for i in xrange(0, msgcount): mlen, moff = unpack(ii, buf[masteridx:masteridx+8]) moff &= MASK mend = moff + (mlen & MASK) tlen, toff = unpack(ii, buf[transidx:transidx+8]) toff &= MASK tend = toff + (tlen & MASK) if mend < buflen and tend < buflen: tmsg = buf[toff:tend] catalog[buf[moff:mend]] = tmsg else: raise IOError(0, 'File is corrupt', filename) # See if we're looking at GNU .mo conventions for metadata if mlen == 0 and tmsg.lower().startswith('project-id-version:'): # Catalog description for item in tmsg.split('\n'): item = item.strip() if not item: continue k, v = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v if k == 'content-type': self._charset = v.split('charset=')[1] # advance to next entry in the seek tables masteridx += 8 transidx += 8
3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e/gettext.py
msgcount &= MASK masteridx &= MASK transidx &= MASK
def _parse(self, fp): """Override this method to support alternative .mo formats.""" # We need to & all 32 bit unsigned integers with 0xffffffff for # portability to 64 bit machines. MASK = 0xffffffff unpack = struct.unpack filename = getattr(fp, 'name', '') # Parse the .mo file header, which consists of 5 little endian 32 # bit words. self._catalog = catalog = {} buf = fp.read() buflen = len(buf) # Are we big endian or little endian? magic = unpack('<i', buf[:4])[0] & MASK if magic == self.LE_MAGIC: version, msgcount, masteridx, transidx = unpack('<4i', buf[4:20]) ii = '<ii' elif magic == self.BE_MAGIC: version, msgcount, masteridx, transidx = unpack('>4i', buf[4:20]) ii = '>ii' else: raise IOError(0, 'Bad magic number', filename) # more unsigned ints msgcount &= MASK masteridx &= MASK transidx &= MASK # Now put all messages from the .mo file buffer into the catalog # dictionary. for i in xrange(0, msgcount): mlen, moff = unpack(ii, buf[masteridx:masteridx+8]) moff &= MASK mend = moff + (mlen & MASK) tlen, toff = unpack(ii, buf[transidx:transidx+8]) toff &= MASK tend = toff + (tlen & MASK) if mend < buflen and tend < buflen: tmsg = buf[toff:tend] catalog[buf[moff:mend]] = tmsg else: raise IOError(0, 'File is corrupt', filename) # See if we're looking at GNU .mo conventions for metadata if mlen == 0 and tmsg.lower().startswith('project-id-version:'): # Catalog description for item in tmsg.split('\n'): item = item.strip() if not item: continue k, v = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v if k == 'content-type': self._charset = v.split('charset=')[1] # advance to next entry in the seek tables masteridx += 8 transidx += 8
3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e/gettext.py
moff &= MASK mend = moff + (mlen & MASK)
mend = moff + mlen
def _parse(self, fp): """Override this method to support alternative .mo formats.""" # We need to & all 32 bit unsigned integers with 0xffffffff for # portability to 64 bit machines. MASK = 0xffffffff unpack = struct.unpack filename = getattr(fp, 'name', '') # Parse the .mo file header, which consists of 5 little endian 32 # bit words. self._catalog = catalog = {} buf = fp.read() buflen = len(buf) # Are we big endian or little endian? magic = unpack('<i', buf[:4])[0] & MASK if magic == self.LE_MAGIC: version, msgcount, masteridx, transidx = unpack('<4i', buf[4:20]) ii = '<ii' elif magic == self.BE_MAGIC: version, msgcount, masteridx, transidx = unpack('>4i', buf[4:20]) ii = '>ii' else: raise IOError(0, 'Bad magic number', filename) # more unsigned ints msgcount &= MASK masteridx &= MASK transidx &= MASK # Now put all messages from the .mo file buffer into the catalog # dictionary. for i in xrange(0, msgcount): mlen, moff = unpack(ii, buf[masteridx:masteridx+8]) moff &= MASK mend = moff + (mlen & MASK) tlen, toff = unpack(ii, buf[transidx:transidx+8]) toff &= MASK tend = toff + (tlen & MASK) if mend < buflen and tend < buflen: tmsg = buf[toff:tend] catalog[buf[moff:mend]] = tmsg else: raise IOError(0, 'File is corrupt', filename) # See if we're looking at GNU .mo conventions for metadata if mlen == 0 and tmsg.lower().startswith('project-id-version:'): # Catalog description for item in tmsg.split('\n'): item = item.strip() if not item: continue k, v = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v if k == 'content-type': self._charset = v.split('charset=')[1] # advance to next entry in the seek tables masteridx += 8 transidx += 8
3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e/gettext.py
toff &= MASK tend = toff + (tlen & MASK)
tend = toff + tlen
def _parse(self, fp): """Override this method to support alternative .mo formats.""" # We need to & all 32 bit unsigned integers with 0xffffffff for # portability to 64 bit machines. MASK = 0xffffffff unpack = struct.unpack filename = getattr(fp, 'name', '') # Parse the .mo file header, which consists of 5 little endian 32 # bit words. self._catalog = catalog = {} buf = fp.read() buflen = len(buf) # Are we big endian or little endian? magic = unpack('<i', buf[:4])[0] & MASK if magic == self.LE_MAGIC: version, msgcount, masteridx, transidx = unpack('<4i', buf[4:20]) ii = '<ii' elif magic == self.BE_MAGIC: version, msgcount, masteridx, transidx = unpack('>4i', buf[4:20]) ii = '>ii' else: raise IOError(0, 'Bad magic number', filename) # more unsigned ints msgcount &= MASK masteridx &= MASK transidx &= MASK # Now put all messages from the .mo file buffer into the catalog # dictionary. for i in xrange(0, msgcount): mlen, moff = unpack(ii, buf[masteridx:masteridx+8]) moff &= MASK mend = moff + (mlen & MASK) tlen, toff = unpack(ii, buf[transidx:transidx+8]) toff &= MASK tend = toff + (tlen & MASK) if mend < buflen and tend < buflen: tmsg = buf[toff:tend] catalog[buf[moff:mend]] = tmsg else: raise IOError(0, 'File is corrupt', filename) # See if we're looking at GNU .mo conventions for metadata if mlen == 0 and tmsg.lower().startswith('project-id-version:'): # Catalog description for item in tmsg.split('\n'): item = item.strip() if not item: continue k, v = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v if k == 'content-type': self._charset = v.split('charset=')[1] # advance to next entry in the seek tables masteridx += 8 transidx += 8
3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3f5e2f11ac43eaefcf057cddfc65684bdfa0ec5e/gettext.py
return struct.unpack('8', data)[0]
return struct.unpack('d', data)[0]
def unpack_double(self): # XXX i = self.pos self.pos = j = i+8 data = self.buf[i:j] if len(data) < 8: raise EOFError return struct.unpack('8', data)[0]
8976880b22c234cf3e3041678425225b4f272cf7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/8976880b22c234cf3e3041678425225b4f272cf7/xdr.py
<SCRIPT LANGUAGE="JavaScript">
<script type="text/javascript">
def js_output(self, attrs=None): # Print javascript return """ <SCRIPT LANGUAGE="JavaScript"> <!-- begin hiding document.cookie = \"%s\" // end hiding --> </script> """ % ( self.OutputString(attrs), )
c2c7e2870bc6ec2f3614819474c39dfc95de1d55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c2c7e2870bc6ec2f3614819474c39dfc95de1d55/Cookie.py
document.cookie = \"%s\"
document.cookie = \"%s\";
def js_output(self, attrs=None): # Print javascript return """ <SCRIPT LANGUAGE="JavaScript"> <!-- begin hiding document.cookie = \"%s\" // end hiding --> </script> """ % ( self.OutputString(attrs), )
c2c7e2870bc6ec2f3614819474c39dfc95de1d55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c2c7e2870bc6ec2f3614819474c39dfc95de1d55/Cookie.py
self.headers = None return -1, line, self.headers
try: [ver, code] = string.split(line, None, 1) msg = "" except ValueError: self.headers = None return -1, line, self.headers
def getreply(self): """Get a reply from the server. Returns a tuple consisting of: - server response code (e.g. '200' if all goes well) - server response string corresponding to response code - any RFC822 headers in the response from the server
745f25f7b3a20f9988c6b2d4b1646e1a865fcd81 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/745f25f7b3a20f9988c6b2d4b1646e1a865fcd81/httplib.py
A IncrementalEncoder encodes an input in multiple steps. The input can be
An IncrementalEncoder encodes an input in multiple steps. The input can be
def decode(self, input, errors='strict'):
63e1aecc311dee68582dd7d6741f2aa23306ffeb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/63e1aecc311dee68582dd7d6741f2aa23306ffeb/codecs.py
Creates a IncrementalEncoder instance.
Creates an IncrementalEncoder instance.
def __init__(self, errors='strict'): """ Creates a IncrementalEncoder instance.
63e1aecc311dee68582dd7d6741f2aa23306ffeb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/63e1aecc311dee68582dd7d6741f2aa23306ffeb/codecs.py
n = ((n+3)/4)*4
n = ((n+3)//4)*4
def pack_fstring(self, n, s): if n < 0: raise ValueError, 'fstring size must be nonnegative' data = s[:n] n = ((n+3)/4)*4 data = data + (n - len(data)) * '\0' self.__buf.write(data)
823ae0a88afefb1a523dc522b86b53e40a8df481 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/823ae0a88afefb1a523dc522b86b53e40a8df481/xdrlib.py
j = i + (n+3)/4*4
j = i + (n+3)//4*4
def unpack_fstring(self, n): if n < 0: raise ValueError, 'fstring size must be nonnegative' i = self.__pos j = i + (n+3)/4*4 if j > len(self.__buf): raise EOFError self.__pos = j return self.__buf[i:i+n]
823ae0a88afefb1a523dc522b86b53e40a8df481 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/823ae0a88afefb1a523dc522b86b53e40a8df481/xdrlib.py
def __div__(self, other): return "B.__div__" def __rdiv__(self, other): return "B.__rdiv__" vereq(B(1) / 1, "B.__div__") vereq(1 / B(1), "B.__rdiv__")
def __floordiv__(self, other): return "B.__floordiv__" def __rfloordiv__(self, other): return "B.__rfloordiv__" vereq(B(1) // 1, "B.__floordiv__") vereq(1 // B(1), "B.__rfloordiv__")
def __div__(self, other): return "B.__div__"
876595902d55c46321b25bd4b48d094b3d6068b0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/876595902d55c46321b25bd4b48d094b3d6068b0/test_descr.py
def __div__(self, other): return "C.__div__" def __rdiv__(self, other): return "C.__rdiv__" vereq(C() / 1, "C.__div__") vereq(1 / C(), "C.__rdiv__")
def __floordiv__(self, other): return "C.__floordiv__" def __rfloordiv__(self, other): return "C.__rfloordiv__" vereq(C() // 1, "C.__floordiv__") vereq(1 // C(), "C.__rfloordiv__")
def __div__(self, other): return "C.__div__"
876595902d55c46321b25bd4b48d094b3d6068b0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/876595902d55c46321b25bd4b48d094b3d6068b0/test_descr.py
def __div__(self, other): return "D.__div__" def __rdiv__(self, other): return "D.__rdiv__" vereq(D() / C(), "D.__div__") vereq(C() / D(), "D.__rdiv__")
def __floordiv__(self, other): return "D.__floordiv__" def __rfloordiv__(self, other): return "D.__rfloordiv__" vereq(D() // C(), "D.__floordiv__") vereq(C() // D(), "D.__rfloordiv__")
def __div__(self, other): return "D.__div__"
876595902d55c46321b25bd4b48d094b3d6068b0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/876595902d55c46321b25bd4b48d094b3d6068b0/test_descr.py
vereq(E.__rdiv__, C.__rdiv__) vereq(E() / 1, "C.__div__") vereq(1 / E(), "C.__rdiv__") vereq(E() / C(), "C.__div__") vereq(C() / E(), "C.__div__")
vereq(E.__rfloordiv__, C.__rfloordiv__) vereq(E() // 1, "C.__floordiv__") vereq(1 // E(), "C.__rfloordiv__") vereq(E() // C(), "C.__floordiv__") vereq(C() // E(), "C.__floordiv__")
def __rdiv__(self, other): return "D.__rdiv__"
876595902d55c46321b25bd4b48d094b3d6068b0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/876595902d55c46321b25bd4b48d094b3d6068b0/test_descr.py
if url[:1] != '/': url = '/' + url
if url and url[:1] != '/': url = '/' + url
def urlunparse((scheme, netloc, url, params, query, fragment)): """Put a parsed URL back together again. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had redundant delimiters, e.g. a ? with an empty query (the draft states that these are equivalent).""" if netloc or (scheme in uses_netloc and url[:2] == '//'): if url[:1] != '/': url = '/' + url url = '//' + (netloc or '') + url if scheme: url = scheme + ':' + url if params: url = url + ';' + params if query: url = url + '?' + query if fragment: url = url + '#' + fragment return url
4611b17b48fbba42d8afeec44a5999dd6c1f9c63 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/4611b17b48fbba42d8afeec44a5999dd6c1f9c63/urlparse.py
return urlunparse((scheme, netloc, path, params, query, fragment))
return url
def urljoin(base, url, allow_fragments = 1): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.""" if not base: return url bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ urlparse(base, '', allow_fragments) scheme, netloc, path, params, query, fragment = \ urlparse(url, bscheme, allow_fragments) if scheme != bscheme or scheme not in uses_relative: return urlunparse((scheme, netloc, path, params, query, fragment)) if scheme in uses_netloc: if netloc: return urlunparse((scheme, netloc, path, params, query, fragment)) netloc = bnetloc if path[:1] == '/': return urlunparse((scheme, netloc, path, params, query, fragment)) if not path: return urlunparse((scheme, netloc, bpath, params, query or bquery, fragment)) segments = bpath.split('/')[:-1] + path.split('/') # XXX The stuff below is bogus in various ways... if segments[-1] == '.': segments[-1] = '' while '.' in segments: segments.remove('.') while 1: i = 1 n = len(segments) - 1 while i < n: if segments[i] == '..' and segments[i-1]: del segments[i-1:i+1] break i = i+1 else: break if len(segments) == 2 and segments[1] == '..' and segments[0] == '': segments[-1] = '' elif len(segments) >= 2 and segments[-1] == '..': segments[-2:] = [''] return urlunparse((scheme, netloc, '/'.join(segments), params, query, fragment))
4611b17b48fbba42d8afeec44a5999dd6c1f9c63 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/4611b17b48fbba42d8afeec44a5999dd6c1f9c63/urlparse.py
params, query or bquery, fragment))
params, query, fragment))
def urljoin(base, url, allow_fragments = 1): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.""" if not base: return url bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ urlparse(base, '', allow_fragments) scheme, netloc, path, params, query, fragment = \ urlparse(url, bscheme, allow_fragments) if scheme != bscheme or scheme not in uses_relative: return urlunparse((scheme, netloc, path, params, query, fragment)) if scheme in uses_netloc: if netloc: return urlunparse((scheme, netloc, path, params, query, fragment)) netloc = bnetloc if path[:1] == '/': return urlunparse((scheme, netloc, path, params, query, fragment)) if not path: return urlunparse((scheme, netloc, bpath, params, query or bquery, fragment)) segments = bpath.split('/')[:-1] + path.split('/') # XXX The stuff below is bogus in various ways... if segments[-1] == '.': segments[-1] = '' while '.' in segments: segments.remove('.') while 1: i = 1 n = len(segments) - 1 while i < n: if segments[i] == '..' and segments[i-1]: del segments[i-1:i+1] break i = i+1 else: break if len(segments) == 2 and segments[1] == '..' and segments[0] == '': segments[-1] = '' elif len(segments) >= 2 and segments[-1] == '..': segments[-2:] = [''] return urlunparse((scheme, netloc, '/'.join(segments), params, query, fragment))
4611b17b48fbba42d8afeec44a5999dd6c1f9c63 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/4611b17b48fbba42d8afeec44a5999dd6c1f9c63/urlparse.py
if segments[i] == '..' and segments[i-1]:
if (segments[i] == '..' and segments[i-1] not in ('', '..')):
def urljoin(base, url, allow_fragments = 1): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.""" if not base: return url bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ urlparse(base, '', allow_fragments) scheme, netloc, path, params, query, fragment = \ urlparse(url, bscheme, allow_fragments) if scheme != bscheme or scheme not in uses_relative: return urlunparse((scheme, netloc, path, params, query, fragment)) if scheme in uses_netloc: if netloc: return urlunparse((scheme, netloc, path, params, query, fragment)) netloc = bnetloc if path[:1] == '/': return urlunparse((scheme, netloc, path, params, query, fragment)) if not path: return urlunparse((scheme, netloc, bpath, params, query or bquery, fragment)) segments = bpath.split('/')[:-1] + path.split('/') # XXX The stuff below is bogus in various ways... if segments[-1] == '.': segments[-1] = '' while '.' in segments: segments.remove('.') while 1: i = 1 n = len(segments) - 1 while i < n: if segments[i] == '..' and segments[i-1]: del segments[i-1:i+1] break i = i+1 else: break if len(segments) == 2 and segments[1] == '..' and segments[0] == '': segments[-1] = '' elif len(segments) >= 2 and segments[-1] == '..': segments[-2:] = [''] return urlunparse((scheme, netloc, '/'.join(segments), params, query, fragment))
4611b17b48fbba42d8afeec44a5999dd6c1f9c63 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/4611b17b48fbba42d8afeec44a5999dd6c1f9c63/urlparse.py
if len(segments) == 2 and segments[1] == '..' and segments[0] == '':
if segments == ['', '..']:
def urljoin(base, url, allow_fragments = 1): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.""" if not base: return url bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ urlparse(base, '', allow_fragments) scheme, netloc, path, params, query, fragment = \ urlparse(url, bscheme, allow_fragments) if scheme != bscheme or scheme not in uses_relative: return urlunparse((scheme, netloc, path, params, query, fragment)) if scheme in uses_netloc: if netloc: return urlunparse((scheme, netloc, path, params, query, fragment)) netloc = bnetloc if path[:1] == '/': return urlunparse((scheme, netloc, path, params, query, fragment)) if not path: return urlunparse((scheme, netloc, bpath, params, query or bquery, fragment)) segments = bpath.split('/')[:-1] + path.split('/') # XXX The stuff below is bogus in various ways... if segments[-1] == '.': segments[-1] = '' while '.' in segments: segments.remove('.') while 1: i = 1 n = len(segments) - 1 while i < n: if segments[i] == '..' and segments[i-1]: del segments[i-1:i+1] break i = i+1 else: break if len(segments) == 2 and segments[1] == '..' and segments[0] == '': segments[-1] = '' elif len(segments) >= 2 and segments[-1] == '..': segments[-2:] = [''] return urlunparse((scheme, netloc, '/'.join(segments), params, query, fragment))
4611b17b48fbba42d8afeec44a5999dd6c1f9c63 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/4611b17b48fbba42d8afeec44a5999dd6c1f9c63/urlparse.py
try: what, tdelta, fileno, lineno = self._nextitem() except TypeError: self._reader.close() raise StopIteration()
what, tdelta, fileno, lineno = self._nextitem()
def next(self, index=0): while 1: try: what, tdelta, fileno, lineno = self._nextitem() except TypeError: # logreader().next() returns None at the end self._reader.close() raise StopIteration()
44baead3145ef04cd1b5e9139fc7e3e221674c41 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/44baead3145ef04cd1b5e9139fc7e3e221674c41/log.py
try: import sys orig = sys.getrefcount(__name__) socket.getnameinfo(__name__,0) except SystemError: if sys.getrefcount(__name__) <> orig: raise TestFailed,"socket.getnameinfo loses a reference"
import sys if not sys.platform.startswith('java'): try: orig = sys.getrefcount(__name__) socket.getnameinfo(__name__,0) except SystemError: if sys.getrefcount(__name__) <> orig: raise TestFailed,"socket.getnameinfo loses a reference"
def missing_ok(str): try: getattr(socket, str) except AttributeError: pass
c934f4cbdcf820ba2c0221e6812d533d741a4253 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c934f4cbdcf820ba2c0221e6812d533d741a4253/test_socket.py
has_cte = is_qp = 0
has_cte = is_qp = is_base64 = 0
def mimify_part(ifile, ofile, is_mime): '''Convert an 8bit part of a MIME mail message to quoted-printable.''' has_cte = is_qp = 0 multipart = None must_quote_body = must_quote_header = has_iso_chars = 0 header = [] header_end = '' message = [] message_end = '' # read header hfile = HeaderFile(ifile) while 1: line = hfile.readline() if not line: break if not must_quote_header and iso_char.search(line) >= 0: must_quote_header = 1 if mv.match(line) >= 0: is_mime = 1 if cte.match(line) >= 0: has_cte = 1 if qp.match(line) >= 0: is_qp = 1 if mp.match(line) >= 0: multipart = '--' + mp.group(1) if he.match(line) >= 0: header_end = line break header.append(line) # read body while 1: line = ifile.readline() if not line: break if multipart: if line == multipart + '--\n': message_end = line break if line == multipart + '\n': message_end = line break if is_qp: while line[-2:] == '=\n': line = line[:-2] newline = ifile.readline() if newline[:len(QUOTE)] == QUOTE: newline = newline[len(QUOTE):] line = line + newline line = mime_decode(line) message.append(line) if not has_iso_chars: if iso_char.search(line) >= 0: has_iso_chars = must_quote_body = 1 if not must_quote_body: if len(line) > MAXLEN: must_quote_body = 1 # convert and output header and body for line in header: if must_quote_header: line = mime_encode_header(line) if chrset.match(line) >= 0: if has_iso_chars: # change us-ascii into iso-8859-1 if string.lower(chrset.group(2)) == 'us-ascii': line = chrset.group(1) + \ CHARSET + chrset.group(3) else: # change iso-8859-* into us-ascii line = chrset.group(1) + 'us-ascii' + chrset.group(3) if has_cte and cte.match(line) >= 0: line = 'Content-Transfer-Encoding: ' if must_quote_body: line = line + 'quoted-printable\n' else: line = line + '7bit\n' ofile.write(line) if (must_quote_header or must_quote_body) and not is_mime: ofile.write('Mime-Version: 1.0\n') ofile.write('Content-Type: text/plain; ') if has_iso_chars: ofile.write('charset="%s"\n' % CHARSET) else: ofile.write('charset="us-ascii"\n') if must_quote_body and not has_cte: ofile.write('Content-Transfer-Encoding: quoted-printable\n') ofile.write(header_end) for line in message: if must_quote_body: line = mime_encode(line, 0) ofile.write(line) ofile.write(message_end) line = message_end while multipart: if line == multipart + '--\n': return if line == multipart + '\n': nifile = File(ifile, multipart) mimify_part(nifile, ofile, 1) line = nifile.peek ofile.write(line) continue
2b85cde0d17a1e1c8a5152e25a229e8ed8c1151c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/2b85cde0d17a1e1c8a5152e25a229e8ed8c1151c/mimify.py
if must_quote_body:
if is_base64: line = line + 'base64\n' elif must_quote_body:
def mimify_part(ifile, ofile, is_mime): '''Convert an 8bit part of a MIME mail message to quoted-printable.''' has_cte = is_qp = 0 multipart = None must_quote_body = must_quote_header = has_iso_chars = 0 header = [] header_end = '' message = [] message_end = '' # read header hfile = HeaderFile(ifile) while 1: line = hfile.readline() if not line: break if not must_quote_header and iso_char.search(line) >= 0: must_quote_header = 1 if mv.match(line) >= 0: is_mime = 1 if cte.match(line) >= 0: has_cte = 1 if qp.match(line) >= 0: is_qp = 1 if mp.match(line) >= 0: multipart = '--' + mp.group(1) if he.match(line) >= 0: header_end = line break header.append(line) # read body while 1: line = ifile.readline() if not line: break if multipart: if line == multipart + '--\n': message_end = line break if line == multipart + '\n': message_end = line break if is_qp: while line[-2:] == '=\n': line = line[:-2] newline = ifile.readline() if newline[:len(QUOTE)] == QUOTE: newline = newline[len(QUOTE):] line = line + newline line = mime_decode(line) message.append(line) if not has_iso_chars: if iso_char.search(line) >= 0: has_iso_chars = must_quote_body = 1 if not must_quote_body: if len(line) > MAXLEN: must_quote_body = 1 # convert and output header and body for line in header: if must_quote_header: line = mime_encode_header(line) if chrset.match(line) >= 0: if has_iso_chars: # change us-ascii into iso-8859-1 if string.lower(chrset.group(2)) == 'us-ascii': line = chrset.group(1) + \ CHARSET + chrset.group(3) else: # change iso-8859-* into us-ascii line = chrset.group(1) + 'us-ascii' + chrset.group(3) if has_cte and cte.match(line) >= 0: line = 'Content-Transfer-Encoding: ' if must_quote_body: line = line + 'quoted-printable\n' else: line = line + '7bit\n' ofile.write(line) if (must_quote_header or must_quote_body) and not is_mime: ofile.write('Mime-Version: 1.0\n') ofile.write('Content-Type: text/plain; ') if has_iso_chars: ofile.write('charset="%s"\n' % CHARSET) else: ofile.write('charset="us-ascii"\n') if must_quote_body and not has_cte: ofile.write('Content-Transfer-Encoding: quoted-printable\n') ofile.write(header_end) for line in message: if must_quote_body: line = mime_encode(line, 0) ofile.write(line) ofile.write(message_end) line = message_end while multipart: if line == multipart + '--\n': return if line == multipart + '\n': nifile = File(ifile, multipart) mimify_part(nifile, ofile, 1) line = nifile.peek ofile.write(line) continue
2b85cde0d17a1e1c8a5152e25a229e8ed8c1151c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/2b85cde0d17a1e1c8a5152e25a229e8ed8c1151c/mimify.py
self.compiler = new_compiler (plat=os.environ.get ('PLAT'), verbose=self.verbose,
self.compiler = new_compiler (verbose=self.verbose,
def run (self):
3a49bb521a67df72c462c5b2454d2d1e5ed5ba8d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/3a49bb521a67df72c462c5b2454d2d1e5ed5ba8d/build_ext.py
def __init__(self, fp):
seekable = 0 def __init__(self, fp, seekable=1):
def __init__(self, fp): self.fp = fp self.stack = [] # Grows down self.level = 0 self.last = 0 self.start = self.fp.tell() self.posstack = [] # Grows down
97274432c613953baf46fea6c3146111ef5c7c53 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/97274432c613953baf46fea6c3146111ef5c7c53/multifile.py
self.start = self.fp.tell() self.posstack = []
if seekable: self.seekable = 1 self.start = self.fp.tell() self.posstack = []
def __init__(self, fp): self.fp = fp self.stack = [] # Grows down self.level = 0 self.last = 0 self.start = self.fp.tell() self.posstack = [] # Grows down
97274432c613953baf46fea6c3146111ef5c7c53 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/97274432c613953baf46fea6c3146111ef5c7c53/multifile.py
self.lastpos = self.tell() - len(line)
if self.seekable: self.lastpos = self.tell() - len(line)
def readline(self): if self.level > 0: return '' line = self.fp.readline() if not line: self.level = len(self.stack) self.last = (self.level > 0) if self.last: err('*** Sudden EOF in MultiFile.readline()\n') return '' if line[:2] <> '--': return line n = len(line) k = n while k > 0 and line[k-1] in string.whitespace: k = k-1 mark = line[2:k] if mark[-2:] == '--': mark1 = mark[:-2] else: mark1 = None for i in range(len(self.stack)): sep = self.stack[i] if sep == mark: self.last = 0 break elif mark1 <> None and sep == mark1: self.last = 1 break else: return line # Get here after break out of loop self.lastpos = self.tell() - len(line) self.level = i+1 if self.level > 1: err('*** Missing endmarker in MultiFile.readline()\n') return ''
97274432c613953baf46fea6c3146111ef5c7c53 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/97274432c613953baf46fea6c3146111ef5c7c53/multifile.py
self.start = self.fp.tell()
if self.seekable: self.start = self.fp.tell()
def next(self): while self.readline(): pass if self.level > 1 or self.last: return 0 self.level = 0 self.last = 0 self.start = self.fp.tell() return 1
97274432c613953baf46fea6c3146111ef5c7c53 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/97274432c613953baf46fea6c3146111ef5c7c53/multifile.py
self.posstack.insert(0, self.start) self.start = self.fp.tell()
if self.seekable: self.posstack.insert(0, self.start) self.start = self.fp.tell()
def push(self, sep): if self.level > 0: raise Error, 'bad MultiFile.push() call' self.stack.insert(0, sep) self.posstack.insert(0, self.start) self.start = self.fp.tell()
97274432c613953baf46fea6c3146111ef5c7c53 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/97274432c613953baf46fea6c3146111ef5c7c53/multifile.py
socketDataProcessed = threading.Condition()
socketDataProcessed = threading.Event()
def handleLogRecord(self, record): logname = "logrecv.tcp." + record.name #If the end-of-messages sentinel is seen, tell the server to terminate if record.msg == FINISH_UP: self.server.abort = 1 record.msg = record.msg + " (via " + logname + ")" logger = logging.getLogger(logname) logger.handle(record)
b12ca583a6d7086b0f656a4acc8ada68425877b6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/b12ca583a6d7086b0f656a4acc8ada68425877b6/test_logging.py
socketDataProcessed.acquire() socketDataProcessed.notify() socketDataProcessed.release()
socketDataProcessed.set()
def serve_until_stopped(self): abort = 0 while not abort: rd, wr, ex = select.select([self.socket.fileno()], [], [], self.timeout) if rd: self.handle_request() abort = self.abort #notify the main thread that we're about to exit socketDataProcessed.acquire() socketDataProcessed.notify() socketDataProcessed.release()
b12ca583a6d7086b0f656a4acc8ada68425877b6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/b12ca583a6d7086b0f656a4acc8ada68425877b6/test_logging.py