rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
self._jsEndElement(item)
self.jsEndElement(item)
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
elif self.headers.get('Content-Type') != gm[0]:
elif self.headers.get('Content-Type') != gm[0] and \ gm[0] in _fix_content_types:
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() if response and response[1] == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) # check content-type against our own guess gm = mimetypes.guess_type(self.document, None) if gm[0]: # guessed an own content type if not self.headers.has_key('Content-Type'): self.headers['Content-Type'] = gm[0] print >>sys.stderr, _("Warning: %s guessed Content-Type (%s)") % \ (self.url, gm[0]) elif self.headers.get('Content-Type') != gm[0]: print >>sys.stderr, _("Warning: %s guessed Content-Type (%s) != server Content-Type (%s)") % \ (self.url, gm[0], self.headers.get('Content-Type')) self.headers['Content-Type'] = gm[0] if gm[1]: # guessed an own encoding type if not self.headers.has_key('Content-Encoding'): self.headers['Content-Encoding'] = gm[1] print >>sys.stderr, _("Warning: %s guessed Content-Encoding (%s)") % \ (self.url, gm[1]) elif self.headers.get('Content-Encoding') != gm[1]: print >>sys.stderr, _("Warning: %s guessed Content-Encoding (%s) != server Content-Encoding (%s)") % \ (self.url, gm[1], self.headers.get('Content-Encoding')) # only fix html content type if gm[1] in _fix_content_types: self.headers['Content-Encoding'] = gm[1] # will content be rewritten? rewrite = None for ro in config['mime_content_rewriting']: if ro.match(self.headers.get('Content-Type', '')): rewrite = "True" break # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.has_key('Content-Length'): self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None
ec615ca9134422dbd5baa580632d3116db58b65c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ec615ca9134422dbd5baa580632d3116db58b65c/HttpServer.py
(self.url, gm[0], self.headers.get('Content-Type'))
(self.url, gm[0], self.headers.get('Content-Type'))
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() if response and response[1] == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) # check content-type against our own guess gm = mimetypes.guess_type(self.document, None) if gm[0]: # guessed an own content type if not self.headers.has_key('Content-Type'): self.headers['Content-Type'] = gm[0] print >>sys.stderr, _("Warning: %s guessed Content-Type (%s)") % \ (self.url, gm[0]) elif self.headers.get('Content-Type') != gm[0]: print >>sys.stderr, _("Warning: %s guessed Content-Type (%s) != server Content-Type (%s)") % \ (self.url, gm[0], self.headers.get('Content-Type')) self.headers['Content-Type'] = gm[0] if gm[1]: # guessed an own encoding type if not self.headers.has_key('Content-Encoding'): self.headers['Content-Encoding'] = gm[1] print >>sys.stderr, _("Warning: %s guessed Content-Encoding (%s)") % \ (self.url, gm[1]) elif self.headers.get('Content-Encoding') != gm[1]: print >>sys.stderr, _("Warning: %s guessed Content-Encoding (%s) != server Content-Encoding (%s)") % \ (self.url, gm[1], self.headers.get('Content-Encoding')) # only fix html content type if gm[1] in _fix_content_types: self.headers['Content-Encoding'] = gm[1] # will content be rewritten? rewrite = None for ro in config['mime_content_rewriting']: if ro.match(self.headers.get('Content-Type', '')): rewrite = "True" break # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.has_key('Content-Length'): self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None
ec615ca9134422dbd5baa580632d3116db58b65c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ec615ca9134422dbd5baa580632d3116db58b65c/HttpServer.py
if gm[1] in _fix_content_types: self.headers['Content-Encoding'] = gm[1]
self.headers['Content-Encoding'] = gm[1]
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() if response and response[1] == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) # check content-type against our own guess gm = mimetypes.guess_type(self.document, None) if gm[0]: # guessed an own content type if not self.headers.has_key('Content-Type'): self.headers['Content-Type'] = gm[0] print >>sys.stderr, _("Warning: %s guessed Content-Type (%s)") % \ (self.url, gm[0]) elif self.headers.get('Content-Type') != gm[0]: print >>sys.stderr, _("Warning: %s guessed Content-Type (%s) != server Content-Type (%s)") % \ (self.url, gm[0], self.headers.get('Content-Type')) self.headers['Content-Type'] = gm[0] if gm[1]: # guessed an own encoding type if not self.headers.has_key('Content-Encoding'): self.headers['Content-Encoding'] = gm[1] print >>sys.stderr, _("Warning: %s guessed Content-Encoding (%s)") % \ (self.url, gm[1]) elif self.headers.get('Content-Encoding') != gm[1]: print >>sys.stderr, _("Warning: %s guessed Content-Encoding (%s) != server Content-Encoding (%s)") % \ (self.url, gm[1], self.headers.get('Content-Encoding')) # only fix html content type if gm[1] in _fix_content_types: self.headers['Content-Encoding'] = gm[1] # will content be rewritten? rewrite = None for ro in config['mime_content_rewriting']: if ro.match(self.headers.get('Content-Type', '')): rewrite = "True" break # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.has_key('Content-Length'): self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None
ec615ca9134422dbd5baa580632d3116db58b65c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ec615ca9134422dbd5baa580632d3116db58b65c/HttpServer.py
def reset (self): """Reset to default values""" self['port'] = 8080 self['sslport'] = 8443 self['sslgateway'] = 0 self['adminuser'] = "" self['adminpass'] = "" self['proxyuser'] = "" self['proxypass'] = "" self['parentproxy'] = "" self['parentproxyport'] = 3128 self['parentproxyuser'] = "" self['parentproxypass'] = "" # dynamically stored parent proxy authorization credentials self['parentproxycreds'] = None self['folderrules'] = [] self['filters'] = [] self['filterlist'] = [[],[],[],[],[],[],[],[],[],[]] self['colorize'] = 0 self['nofilterhosts'] = None # DNS resolved nofilterhosts self['allowedhosts'] = None self['starttime'] = time.time()
540a64c653d1a99ca189c67f23ae6401e651dc1e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/540a64c653d1a99ca189c67f23ae6401e651dc1e/__init__.py
f = file(self['configfile'], 'w')
f = file(self.configfile, 'w')
def write_proxyconf (self): """write proxy configuration""" f = file(self['configfile'], 'w') f.write("""<?xml version="1.0" encoding="%s"?>
540a64c653d1a99ca189c67f23ae6401e651dc1e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/540a64c653d1a99ca189c67f23ae6401e651dc1e/__init__.py
debug(GUI, "Translator catalog %s", str(translator._catalog))
def __init__ (self, client, url, form, protocol, status=200, msg=i18n._('Ok'), context={}, headers={'Content-Type': 'text/html'}): self.client = client # we pretend to be the server self.connected = True try: lang = i18n.get_headers_lang(headers) # get the template filename path, dirs, lang = get_template_url(url, lang) # do not rely on content-type header value if path.endswith('.html'): headers['Content-Type'] = 'text/html' f = file(path) # get TAL context context = get_context(dirs, form, context, lang) # get translator translator = gettext.translation(Name, LocaleDir, [lang], fallback=True) debug(GUI, "Using translator %s", str(translator.info())) debug(GUI, "Translator catalog %s", str(translator._catalog)) # expand template data = expand_template(f, context, translator=translator) else: f = file(path, 'rb') data = f.read() except IOError, e: exception(GUI, "Wrong path `%s'", url) # XXX this can actually lead to a maximum recursion # error when client.error caused the exception return client.error(404, i18n._("Not Found")) except: # catch all other exceptions and report internal error exception(GUI, "Template error") return client.error(500, i18n._("Internal Error")) f.close() # write response self.put_response(data, protocol, status, msg, headers)
71ab1a58a5aeef74cce9f18822f5b0986af478d1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/71ab1a58a5aeef74cce9f18822f5b0986af478d1/__init__.py
if compobj: header = compobj['header'] if header: compobj['header'] = '' wc.log.debug(wc.LOG_FILTER, 'writing gzip header') compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data))
header = compobj['header'] if header: compobj['header'] = '' wc.log.debug(wc.LOG_FILTER, 'writing gzip header') compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data))
def filter (self, data, **attrs): """Compress the string s. Note that compression state is saved outside of this function in the compression object. """ if self.init_compressor: self.set_encoding_header(attrs) self.init_compressor = False if not attrs.has_key('compressobj'): return data compobj = attrs['compressobj'] if compobj: header = compobj['header'] if header: compobj['header'] = '' wc.log.debug(wc.LOG_FILTER, 'writing gzip header') compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data)) return data
14c89009117695ced062afbb3779b8881bc9abe1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/14c89009117695ced062afbb3779b8881bc9abe1/Compress.py
if compobj: header = compobj['header'] if header: wc.log.debug(wc.LOG_FILTER, 'final writing gzip header') pass if data: compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data)) else: data = header wc.log.debug(wc.LOG_FILTER, 'finishing compressor') data += "%s%s%s" % (compobj['compressor'].flush(zlib.Z_FINISH), struct.pack('<l', compobj['crc']), struct.pack('<l', compobj['size']))
header = compobj['header'] if header: wc.log.debug(wc.LOG_FILTER, 'final writing gzip header') pass if data: compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data)) else: data = header wc.log.debug(wc.LOG_FILTER, 'finishing compressor') data += "%s%s%s" % (compobj['compressor'].flush(zlib.Z_FINISH), struct.pack('<l', compobj['crc']), struct.pack('<l', compobj['size']))
def finish (self, data, **attrs): """final compression of data, flush gzip buffers""" if not attrs.has_key('compressobj'): return data compobj = attrs['compressobj'] if compobj: header = compobj['header'] if header: wc.log.debug(wc.LOG_FILTER, 'final writing gzip header') pass if data: compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data)) else: data = header wc.log.debug(wc.LOG_FILTER, 'finishing compressor') data += "%s%s%s" % (compobj['compressor'].flush(zlib.Z_FINISH), struct.pack('<l', compobj['crc']), struct.pack('<l', compobj['size'])) return data
14c89009117695ced062afbb3779b8881bc9abe1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/14c89009117695ced062afbb3779b8881bc9abe1/Compress.py
res.append(key[len(prefix):], get_item_value(form[key]))
res.append((key[len(prefix):], get_item_value(form[key])))
def get_prefix_vals (form, prefix): """return a list of (key, value) pairs where ``prefix+key'' is a valid form field""" res = [] for key in form: if key.startswith(prefix): res.append(key[len(prefix):], get_item_value(form[key])) return res
c56fa52a2caa06cd3948334ebd929c78b5db3a72 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/c56fa52a2caa06cd3948334ebd929c78b5db3a72/__init__.py
return self.open(newurl, data)
if data is None: return self.open(newurl) else: return self.open(newurl, data)
def http_error_302(self, url, fp, errcode, errmsg, headers, data=None): # XXX The server can force infinite recursion here! if headers.has_key('location'): newurl = headers['location'] elif headers.has_key('uri'): newurl = headers['uri'] else: return void = fp.read() fp.close() # In case the server sent a relative URL, join with original: newurl = basejoin("http:" + url, newurl) return self.open(newurl, data)
3c8baedaf8c69bd92f06a7de5f16cde19a9beb55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/3c8baedaf8c69bd92f06a7de5f16cde19a9beb55/urllib.py
return getattr(self,name)(url, realm) def retry_http_basic_auth(self, url, realm, data):
if data is None: return getattr(self,name)(url, realm) else: return getattr(self,name)(url, realm, data) def retry_http_basic_auth(self, url, realm, data=None):
def http_error_401(self, url, fp, errcode, errmsg, headers, data=None): if headers.has_key('www-authenticate'): stuff = headers['www-authenticate'] import re match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) if match: scheme, realm = match.groups() if string.lower(scheme) == 'basic': name = 'retry_' + self.type + '_basic_auth' return getattr(self,name)(url, realm)
3c8baedaf8c69bd92f06a7de5f16cde19a9beb55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/3c8baedaf8c69bd92f06a7de5f16cde19a9beb55/urllib.py
return self.open(newurl, data)
if data is None: return self.open(newurl) else: return self.open(newurl, data)
def retry_http_basic_auth(self, url, realm, data): host, selector = splithost(url) i = string.find(host, '@') + 1 host = host[i:] user, passwd = self.get_user_passwd(host, realm, i) if not (user or passwd): return None host = user + ':' + passwd + '@' + host newurl = 'http://' + host + selector return self.open(newurl, data)
3c8baedaf8c69bd92f06a7de5f16cde19a9beb55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/3c8baedaf8c69bd92f06a7de5f16cde19a9beb55/urllib.py
def retry_https_basic_auth(self, url, realm):
def retry_https_basic_auth(self, url, realm, data=None):
def retry_https_basic_auth(self, url, realm): host, selector = splithost(url) i = string.find(host, '@') + 1 host = host[i:] user, passwd = self.get_user_passwd(host, realm, i) if not (user or passwd): return None host = user + ':' + passwd + '@' + host newurl = '//' + host + selector return self.open_https(newurl)
3c8baedaf8c69bd92f06a7de5f16cde19a9beb55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/3c8baedaf8c69bd92f06a7de5f16cde19a9beb55/urllib.py
return s[:colon-1], s[colon:]
path, file = s[:colon-1], s[colon:] if path and not ':' in path: path = path + ':' return path, file
def split(s): if ':' not in s: return '', s colon = 0 for i in range(len(s)): if s[i] == ':': colon = i+1 return s[:colon-1], s[colon:]
d6a111e2dd257510069e46e5d8bb85abd80cf184 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/d6a111e2dd257510069e46e5d8bb85abd80cf184/macpath.py
"Divide two Rats, returning quotient and remainder (reversed args)."""
"""Divide two Rats, returning quotient and remainder (reversed args)."""
def __rdivmod__(self, other): "Divide two Rats, returning quotient and remainder (reversed args).""" if isint(other): other = Rat(other) elif not isRat(other): return NotImplemented return divmod(other, self)
53e9a8b9f3ce1514e2106c8a9eff713f84fc42c5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/53e9a8b9f3ce1514e2106c8a9eff713f84fc42c5/test_binop.py
dummy = (0, 0, 0, 0, "NULL")
dummy = (0, 0, 0, 0)
def maketable(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0, "NULL") table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) DECOMPOSITION = [""] for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL" item = ( category, combining, bidirectional, mirrored, decomposition ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # FIXME: we really should compress the decomposition stuff # (see the unidb utilities for one way to do this) FILE = "unicodedata_db.h" sys.stdout = open(FILE, "w") print "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print print "/* a list of unique database records */" print "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print " {%d, %d, %d, %d, %s}," % item print "};" print print "/* string literals */" print "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print " \"%s\"," % name print " NULL" print "};" print "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print " \"%s\"," % name print " NULL" print "};" # split index table index1, index2, shift = splitbins(index) print "/* index tables used to find the right database record */" print "#define SHIFT", shift Array("index1", index1).dump(sys.stdout) Array("index2", index2).dump(sys.stdout) sys.stdout = sys.__stdout__
cfcea4921865a922744dc168dde5eaccde8fe50b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/cfcea4921865a922744dc168dde5eaccde8fe50b/makeunicodedata.py
DECOMPOSITION = [""]
def maketable(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0, "NULL") table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) DECOMPOSITION = [""] for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL" item = ( category, combining, bidirectional, mirrored, decomposition ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # FIXME: we really should compress the decomposition stuff # (see the unidb utilities for one way to do this) FILE = "unicodedata_db.h" sys.stdout = open(FILE, "w") print "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print print "/* a list of unique database records */" print "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print " {%d, %d, %d, %d, %s}," % item print "};" print print "/* string literals */" print "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print " \"%s\"," % name print " NULL" print "};" print "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print " \"%s\"," % name print " NULL" print "};" # split index table index1, index2, shift = splitbins(index) print "/* index tables used to find the right database record */" print "#define SHIFT", shift Array("index1", index1).dump(sys.stdout) Array("index2", index2).dump(sys.stdout) sys.stdout = sys.__stdout__
cfcea4921865a922744dc168dde5eaccde8fe50b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/cfcea4921865a922744dc168dde5eaccde8fe50b/makeunicodedata.py
if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL"
def maketable(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0, "NULL") table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) DECOMPOSITION = [""] for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL" item = ( category, combining, bidirectional, mirrored, decomposition ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # FIXME: we really should compress the decomposition stuff # (see the unidb utilities for one way to do this) FILE = "unicodedata_db.h" sys.stdout = open(FILE, "w") print "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print print "/* a list of unique database records */" print "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print " {%d, %d, %d, %d, %s}," % item print "};" print print "/* string literals */" print "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print " \"%s\"," % name print " NULL" print "};" print "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print " \"%s\"," % name print " NULL" print "};" # split index table index1, index2, shift = splitbins(index) print "/* index tables used to find the right database record */" print "#define SHIFT", shift Array("index1", index1).dump(sys.stdout) Array("index2", index2).dump(sys.stdout) sys.stdout = sys.__stdout__
cfcea4921865a922744dc168dde5eaccde8fe50b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/cfcea4921865a922744dc168dde5eaccde8fe50b/makeunicodedata.py
category, combining, bidirectional, mirrored, decomposition
category, combining, bidirectional, mirrored
def maketable(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0, "NULL") table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) DECOMPOSITION = [""] for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL" item = ( category, combining, bidirectional, mirrored, decomposition ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # FIXME: we really should compress the decomposition stuff # (see the unidb utilities for one way to do this) FILE = "unicodedata_db.h" sys.stdout = open(FILE, "w") print "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print print "/* a list of unique database records */" print "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print " {%d, %d, %d, %d, %s}," % item print "};" print print "/* string literals */" print "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print " \"%s\"," % name print " NULL" print "};" print "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print " \"%s\"," % name print " NULL" print "};" # split index table index1, index2, shift = splitbins(index) print "/* index tables used to find the right database record */" print "#define SHIFT", shift Array("index1", index1).dump(sys.stdout) Array("index2", index2).dump(sys.stdout) sys.stdout = sys.__stdout__
cfcea4921865a922744dc168dde5eaccde8fe50b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/cfcea4921865a922744dc168dde5eaccde8fe50b/makeunicodedata.py
decomp_data = [""] decomp_index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: if record[5]: try: i = decomp_data.index(record[5]) except ValueError: i = len(decomp_data) decomp_data.append(record[5]) else: i = 0 decomp_index[char] = i
def maketable(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0, "NULL") table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) DECOMPOSITION = [""] for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL" item = ( category, combining, bidirectional, mirrored, decomposition ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # FIXME: we really should compress the decomposition stuff # (see the unidb utilities for one way to do this) FILE = "unicodedata_db.h" sys.stdout = open(FILE, "w") print "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print print "/* a list of unique database records */" print "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print " {%d, %d, %d, %d, %s}," % item print "};" print print "/* string literals */" print "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print " \"%s\"," % name print " NULL" print "};" print "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print " \"%s\"," % name print " NULL" print "};" # split index table index1, index2, shift = splitbins(index) print "/* index tables used to find the right database record */" print "#define SHIFT", shift Array("index1", index1).dump(sys.stdout) Array("index2", index2).dump(sys.stdout) sys.stdout = sys.__stdout__
cfcea4921865a922744dc168dde5eaccde8fe50b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/cfcea4921865a922744dc168dde5eaccde8fe50b/makeunicodedata.py
print " {%d, %d, %d, %d, %s}," % item
print " {%d, %d, %d, %d}," % item
def maketable(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0, "NULL") table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) DECOMPOSITION = [""] for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL" item = ( category, combining, bidirectional, mirrored, decomposition ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # FIXME: we really should compress the decomposition stuff # (see the unidb utilities for one way to do this) FILE = "unicodedata_db.h" sys.stdout = open(FILE, "w") print "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print print "/* a list of unique database records */" print "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print " {%d, %d, %d, %d, %s}," % item print "};" print print "/* string literals */" print "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print " \"%s\"," % name print " NULL" print "};" print "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print " \"%s\"," % name print " NULL" print "};" # split index table index1, index2, shift = splitbins(index) print "/* index tables used to find the right database record */" print "#define SHIFT", shift Array("index1", index1).dump(sys.stdout) Array("index2", index2).dump(sys.stdout) sys.stdout = sys.__stdout__
cfcea4921865a922744dc168dde5eaccde8fe50b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/cfcea4921865a922744dc168dde5eaccde8fe50b/makeunicodedata.py
if not edit: if cmd: sys.argv = ["-c"] + args else: sys.argv = args or [""]
def main(): cmd = None edit = 0 debug = 0 startup = 0 try: opts, args = getopt.getopt(sys.argv[1:], "c:deist:") except getopt.error, msg: sys.stderr.write("Error: %s\n" % str(msg)) sys.stderr.write(usage_msg) sys.exit(2) for o, a in opts: if o == '-c': cmd = a if o == '-d': debug = 1 if o == '-e': edit = 1 if o == '-s': startup = 1 if o == '-t': PyShell.shell_title = a if not edit: if cmd: sys.argv = ["-c"] + args else: sys.argv = args or [""] for i in range(len(sys.path)): sys.path[i] = os.path.abspath(sys.path[i]) pathx = [] if edit: for filename in args: pathx.append(os.path.dirname(filename)) elif args and args[0] != "-": pathx.append(os.path.dirname(args[0])) else: pathx.append(os.curdir) for dir in pathx: dir = os.path.abspath(dir) if not dir in sys.path: sys.path.insert(0, dir) global flist, root root = Tk() fixwordbreaks(root) root.withdraw() flist = PyShellFileList(root) if edit: for filename in args: flist.open(filename) shell = PyShell(flist) interp = shell.interp flist.pyshell = shell if startup: filename = os.environ.get("IDLESTARTUP") or \ os.environ.get("PYTHONSTARTUP") if filename and os.path.isfile(filename): interp.execfile(filename) if debug: shell.open_debugger() if cmd: interp.execsource(cmd) elif not edit and args and args[0] != "-": interp.execfile(args[0]) shell.begin() root.mainloop() root.destroy()
10515b6483b622625838b3636dbb442097bfe7ef /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/10515b6483b622625838b3636dbb442097bfe7ef/PyShell.py
root = Tk()
root = Tk(className="Idle")
def main(): cmd = None edit = 0 debug = 0 startup = 0 try: opts, args = getopt.getopt(sys.argv[1:], "c:deist:") except getopt.error, msg: sys.stderr.write("Error: %s\n" % str(msg)) sys.stderr.write(usage_msg) sys.exit(2) for o, a in opts: if o == '-c': cmd = a if o == '-d': debug = 1 if o == '-e': edit = 1 if o == '-s': startup = 1 if o == '-t': PyShell.shell_title = a if not edit: if cmd: sys.argv = ["-c"] + args else: sys.argv = args or [""] for i in range(len(sys.path)): sys.path[i] = os.path.abspath(sys.path[i]) pathx = [] if edit: for filename in args: pathx.append(os.path.dirname(filename)) elif args and args[0] != "-": pathx.append(os.path.dirname(args[0])) else: pathx.append(os.curdir) for dir in pathx: dir = os.path.abspath(dir) if not dir in sys.path: sys.path.insert(0, dir) global flist, root root = Tk() fixwordbreaks(root) root.withdraw() flist = PyShellFileList(root) if edit: for filename in args: flist.open(filename) shell = PyShell(flist) interp = shell.interp flist.pyshell = shell if startup: filename = os.environ.get("IDLESTARTUP") or \ os.environ.get("PYTHONSTARTUP") if filename and os.path.isfile(filename): interp.execfile(filename) if debug: shell.open_debugger() if cmd: interp.execsource(cmd) elif not edit and args and args[0] != "-": interp.execfile(args[0]) shell.begin() root.mainloop() root.destroy()
10515b6483b622625838b3636dbb442097bfe7ef /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/10515b6483b622625838b3636dbb442097bfe7ef/PyShell.py
fin = open(self.tmpin, 'wb')
fin = open(self.tmpin, 'w')
def test_encode(self): try: fin = open(self.tmpin, 'wb') fin.write(plaintext) fin.close()
3fa5e6ee4527219a9351413ac6ba425be8ea5dd8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/3fa5e6ee4527219a9351413ac6ba425be8ea5dd8/test_uu.py
fin = open(self.tmpin, 'rb')
fin = open(self.tmpin, 'r')
def test_encode(self): try: fin = open(self.tmpin, 'wb') fin.write(plaintext) fin.close()
3fa5e6ee4527219a9351413ac6ba425be8ea5dd8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/3fa5e6ee4527219a9351413ac6ba425be8ea5dd8/test_uu.py
uu.encode(self.tmpin, self.tmpout, mode=0644)
uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0644)
def test_encode(self): try: fin = open(self.tmpin, 'wb') fin.write(plaintext) fin.close()
3fa5e6ee4527219a9351413ac6ba425be8ea5dd8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/3fa5e6ee4527219a9351413ac6ba425be8ea5dd8/test_uu.py
f = open(self.tmpin, 'wb')
f = open(self.tmpin, 'w')
def test_decode(self): try: f = open(self.tmpin, 'wb') f.write(encodedtextwrapped % (0644, self.tmpout)) f.close()
3fa5e6ee4527219a9351413ac6ba425be8ea5dd8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/3fa5e6ee4527219a9351413ac6ba425be8ea5dd8/test_uu.py
f = open(self.tmpin, 'rb')
f = open(self.tmpin, 'r')
def test_decode(self): try: f = open(self.tmpin, 'wb') f.write(encodedtextwrapped % (0644, self.tmpout)) f.close()
3fa5e6ee4527219a9351413ac6ba425be8ea5dd8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/3fa5e6ee4527219a9351413ac6ba425be8ea5dd8/test_uu.py
f = open(self.tmpin, 'rb')
f = open(self.tmpin, 'r')
def test_decodetwice(self): # Verify that decode() will refuse to overwrite an existing file try: f = cStringIO.StringIO(encodedtextwrapped % (0644, self.tmpout))
3fa5e6ee4527219a9351413ac6ba425be8ea5dd8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/3fa5e6ee4527219a9351413ac6ba425be8ea5dd8/test_uu.py
f = open(self.tmpin, 'rb')
f = open(self.tmpin, 'r')
def test_decodetwice(self): # Verify that decode() will refuse to overwrite an existing file try: f = cStringIO.StringIO(encodedtextwrapped % (0644, self.tmpout))
3fa5e6ee4527219a9351413ac6ba425be8ea5dd8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/3fa5e6ee4527219a9351413ac6ba425be8ea5dd8/test_uu.py
danger = [ x for x in tokens if x[0] == token.NAME and x[1] != 'n' ] if danger: raise ValueError, 'dangerous expression'
try: danger = [ x for x in tokens if x[0] == token.NAME and x[1] != 'n' ] except tokenize.TokenError: raise ValueError, \ 'plural forms expression error, maybe unbalanced parenthesis' else: if danger: raise ValueError, 'plural forms expression could be dangerous'
def c2py(plural): """ Gets a C expression as used in PO files for plural forms and returns a Python lambda function that implements an equivalent expression. """ # Security check, allow only the "n" identifier from StringIO import StringIO import token, tokenize tokens = tokenize.generate_tokens(StringIO(plural).readline) danger = [ x for x in tokens if x[0] == token.NAME and x[1] != 'n' ] if danger: raise ValueError, 'dangerous expression' # Replace some C operators by their Python equivalents plural = plural.replace('&&', ' and ') plural = plural.replace('||', ' or ') expr = re.compile(r'\![^=]') plural = expr.sub(' not ', plural) # Regular expression and replacement function used to transform # "a?b:c" to "test(a,b,c)". expr = re.compile(r'(.*?)\?(.*?):(.*)') def repl(x): return "test(%s, %s, %s)" % (x.group(1), x.group(2), expr.sub(repl, x.group(3))) # Code to transform the plural expression, taking care of parentheses stack = [''] for c in plural: if c == '(': stack.append('') elif c == ')': if len(stack) == 0: raise ValueError, 'unbalanced parenthesis in plural form' s = expr.sub(repl, stack.pop()) stack[-1] += '(%s)' % s else: stack[-1] += c plural = expr.sub(repl, stack.pop()) return eval('lambda n: int(%s)' % plural)
a57dccdcd473f3c8187e68348ebcd71c904be9e3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/a57dccdcd473f3c8187e68348ebcd71c904be9e3/gettext.py
expr = re.compile(r'\![^=]') plural = expr.sub(' not ', plural)
expr = re.compile(r'\!([^=])') plural = expr.sub(' not \\1', plural)
def c2py(plural): """ Gets a C expression as used in PO files for plural forms and returns a Python lambda function that implements an equivalent expression. """ # Security check, allow only the "n" identifier from StringIO import StringIO import token, tokenize tokens = tokenize.generate_tokens(StringIO(plural).readline) danger = [ x for x in tokens if x[0] == token.NAME and x[1] != 'n' ] if danger: raise ValueError, 'dangerous expression' # Replace some C operators by their Python equivalents plural = plural.replace('&&', ' and ') plural = plural.replace('||', ' or ') expr = re.compile(r'\![^=]') plural = expr.sub(' not ', plural) # Regular expression and replacement function used to transform # "a?b:c" to "test(a,b,c)". expr = re.compile(r'(.*?)\?(.*?):(.*)') def repl(x): return "test(%s, %s, %s)" % (x.group(1), x.group(2), expr.sub(repl, x.group(3))) # Code to transform the plural expression, taking care of parentheses stack = [''] for c in plural: if c == '(': stack.append('') elif c == ')': if len(stack) == 0: raise ValueError, 'unbalanced parenthesis in plural form' s = expr.sub(repl, stack.pop()) stack[-1] += '(%s)' % s else: stack[-1] += c plural = expr.sub(repl, stack.pop()) return eval('lambda n: int(%s)' % plural)
a57dccdcd473f3c8187e68348ebcd71c904be9e3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/a57dccdcd473f3c8187e68348ebcd71c904be9e3/gettext.py
if len(stack) == 0:
if len(stack) == 1:
def repl(x): return "test(%s, %s, %s)" % (x.group(1), x.group(2), expr.sub(repl, x.group(3)))
a57dccdcd473f3c8187e68348ebcd71c904be9e3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/a57dccdcd473f3c8187e68348ebcd71c904be9e3/gettext.py
if mlen == 0 and tmsg.lower().startswith('project-id-version:'):
if mlen == 0:
def _parse(self, fp): """Override this method to support alternative .mo formats.""" unpack = struct.unpack filename = getattr(fp, 'name', '') # Parse the .mo file header, which consists of 5 little endian 32 # bit words. self._catalog = catalog = {} buf = fp.read() buflen = len(buf) # Are we big endian or little endian? magic = unpack('<I', buf[:4])[0] if magic == self.LE_MAGIC: version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20]) ii = '<II' elif magic == self.BE_MAGIC: version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20]) ii = '>II' else: raise IOError(0, 'Bad magic number', filename) # Now put all messages from the .mo file buffer into the catalog # dictionary. for i in xrange(0, msgcount): mlen, moff = unpack(ii, buf[masteridx:masteridx+8]) mend = moff + mlen tlen, toff = unpack(ii, buf[transidx:transidx+8]) tend = toff + tlen if mend < buflen and tend < buflen: msg = buf[moff:mend] tmsg = buf[toff:tend] if msg.find('\x00') >= 0: # Plural forms msgid1, msgid2 = msg.split('\x00') tmsg = tmsg.split('\x00') for i in range(len(tmsg)): catalog[(msgid1, i)] = tmsg[i] else: catalog[msg] = tmsg else: raise IOError(0, 'File is corrupt', filename) # See if we're looking at GNU .mo conventions for metadata if mlen == 0 and tmsg.lower().startswith('project-id-version:'): # Catalog description for item in tmsg.split('\n'): item = item.strip() if not item: continue k, v = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v if k == 'content-type': self._charset = v.split('charset=')[1] elif k == 'plural-forms': v = v.split(';')
a57dccdcd473f3c8187e68348ebcd71c904be9e3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/a57dccdcd473f3c8187e68348ebcd71c904be9e3/gettext.py
'unixware5':
'unixware7':
def printlist(x, width=70, indent=4): """Print the elements of iterable x to stdout. Optional arg width (default 70) is the maximum line length. Optional arg indent (default 4) is the number of blanks with which to begin each line. """ from textwrap import fill blanks = ' ' * indent print fill(' '.join(map(str, x)), width, initial_indent=blanks, subsequent_indent=blanks)
21ee4091e10c6f05360bbb60e49aa3639408a612 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/21ee4091e10c6f05360bbb60e49aa3639408a612/regrtest.py
self.membernames = []
def __init__(self, name=None, mode="r", fileobj=None): """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to read from an existing archive, 'a' to append data to an existing file or 'w' to create a new file overwriting an existing one. `mode' defaults to 'r'. If `fileobj' is given, it is used for reading or writing data. If it can be determined, `mode' is overridden by `fileobj's mode. `fileobj' is not closed, when TarFile is closed. """ self.name = name
f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be/tarfile.py
self._check() if name not in self.membernames and not self._loaded: self._load() if name not in self.membernames:
tarinfo = self._getmember(name) if tarinfo is None:
def getmember(self, name): """Return a TarInfo object for member `name'. If `name' can not be found in the archive, KeyError is raised. If a member occurs more than once in the archive, its last occurence is assumed to be the most up-to-date version. """ self._check() if name not in self.membernames and not self._loaded: self._load() if name not in self.membernames: raise KeyError, "filename %r not found" % name return self._getmember(name)
f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be/tarfile.py
return self._getmember(name)
return tarinfo
def getmember(self, name): """Return a TarInfo object for member `name'. If `name' can not be found in the archive, KeyError is raised. If a member occurs more than once in the archive, its last occurence is assumed to be the most up-to-date version. """ self._check() if name not in self.membernames and not self._loaded: self._load() if name not in self.membernames: raise KeyError, "filename %r not found" % name return self._getmember(name)
f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be/tarfile.py
self._check() if not self._loaded: self._load() return self.membernames
return [tarinfo.name for tarinfo in self.getmembers()]
def getnames(self): """Return the members of the archive as a list of their names. It has the same order as the list returned by getmembers(). """ self._check() if not self._loaded: self._load() return self.membernames
f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be/tarfile.py
self._record_member(tarinfo)
self.members.append(tarinfo)
def addfile(self, tarinfo, fileobj=None): """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is given, tarinfo.size bytes are read from it and added to the archive. You can create TarInfo objects using gettarinfo(). On Windows platforms, `fileobj' should always be opened with mode 'rb' to avoid irritation about the file size. """ self._check("aw")
f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be/tarfile.py
self._record_member(tarinfo)
self.members.append(tarinfo)
def next(self): """Return the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available. """ self._check("ra") if self.firstmember is not None: m = self.firstmember self.firstmember = None return m
f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be/tarfile.py
def next(self): """Return the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available. """ self._check("ra") if self.firstmember is not None: m = self.firstmember self.firstmember = None return m
f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be/tarfile.py
self._record_member(tarinfo)
self.members.append(tarinfo)
def proc_sparse(self, tarinfo): """Analyze a GNU sparse header plus extra headers. """ buf = tarinfo.tobuf() sp = _ringbuffer() pos = 386 lastpos = 0L realpos = 0L # There are 4 possible sparse structs in the # first header. for i in xrange(4): try: offset = int(buf[pos:pos + 12], 8) numbytes = int(buf[pos + 12:pos + 24], 8) except ValueError: break if offset > lastpos: sp.append(_hole(lastpos, offset - lastpos)) sp.append(_data(offset, numbytes, realpos)) realpos += numbytes lastpos = offset + numbytes pos += 24
f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be/tarfile.py
end = len(self.members) else: end = self.members.index(tarinfo)
end = len(members) else: end = members.index(tarinfo)
def _getmember(self, name, tarinfo=None): """Find an archive member by name from bottom to top. If tarinfo is given, it is used as the starting point. """ if tarinfo is None: end = len(self.members) else: end = self.members.index(tarinfo)
f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be/tarfile.py
if name == self.membernames[i]: return self.members[i] def _record_member(self, tarinfo): """Record a tarinfo object in the internal datastructures. """ self.members.append(tarinfo) self.membernames.append(tarinfo.name)
if name == members[i].name: return members[i]
def _getmember(self, name, tarinfo=None): """Find an archive member by name from bottom to top. If tarinfo is given, it is used as the starting point. """ if tarinfo is None: end = len(self.members) else: end = self.members.index(tarinfo)
f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f3c5611fef7cbd2ac8f0bf4e44bf11fe210817be/tarfile.py
langdict = {}
nelangs = []
def find(domain, localedir=None, languages=None): # Get some reasonable defaults for arguments that were not supplied if localedir is None: localedir = _default_localedir if languages is None: languages = [] for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'): val = os.environ.get(envar) if val: languages = val.split(':') break if 'C' not in languages: languages.append('C') # now normalize and expand the languages langdict = {} for lang in languages: for nelang in _expand_lang(lang): langdict[nelang] = nelang languages = langdict.keys() # select a language for lang in languages: if lang == 'C': break mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain) if os.path.exists(mofile): return mofile return None
75f8101c429e132472bba92f3972b7eac68f35f8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/75f8101c429e132472bba92f3972b7eac68f35f8/gettext.py
langdict[nelang] = nelang languages = langdict.keys()
if nelang not in nelangs: nelangs.append(nelang)
def find(domain, localedir=None, languages=None): # Get some reasonable defaults for arguments that were not supplied if localedir is None: localedir = _default_localedir if languages is None: languages = [] for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'): val = os.environ.get(envar) if val: languages = val.split(':') break if 'C' not in languages: languages.append('C') # now normalize and expand the languages langdict = {} for lang in languages: for nelang in _expand_lang(lang): langdict[nelang] = nelang languages = langdict.keys() # select a language for lang in languages: if lang == 'C': break mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain) if os.path.exists(mofile): return mofile return None
75f8101c429e132472bba92f3972b7eac68f35f8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/75f8101c429e132472bba92f3972b7eac68f35f8/gettext.py
for lang in languages:
for lang in nelangs:
def find(domain, localedir=None, languages=None): # Get some reasonable defaults for arguments that were not supplied if localedir is None: localedir = _default_localedir if languages is None: languages = [] for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'): val = os.environ.get(envar) if val: languages = val.split(':') break if 'C' not in languages: languages.append('C') # now normalize and expand the languages langdict = {} for lang in languages: for nelang in _expand_lang(lang): langdict[nelang] = nelang languages = langdict.keys() # select a language for lang in languages: if lang == 'C': break mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain) if os.path.exists(mofile): return mofile return None
75f8101c429e132472bba92f3972b7eac68f35f8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/75f8101c429e132472bba92f3972b7eac68f35f8/gettext.py
self.spawn ([self.rc] +
self.spawn ([self.rc] + pp_opts +
def compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
9582794c17b92ae5bb83878abe854d965e51dcdb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/9582794c17b92ae5bb83878abe854d965e51dcdb/msvccompiler.py
output.write(input.read())
return output.write(input.read())
def decode(input, output, encoding): """Decode common content-transfer-encodings (base64, quopri, uuencode).""" if encoding == 'base64': import base64 return base64.decode(input, output) if encoding == 'quoted-printable': import quopri return quopri.decode(input, output) if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'): import uu return uu.decode(input, output) if encoding in ('7bit', '8bit'): output.write(input.read()) if decodetab.has_key(encoding): pipethrough(input, decodetab[encoding], output) else: raise ValueError, \ 'unknown Content-Transfer-Encoding: %s' % encoding
25d1692434bb9613101f623bee2b59d86036a84e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/25d1692434bb9613101f623bee2b59d86036a84e/mimetools.py
output.write(input.read())
return output.write(input.read())
def encode(input, output, encoding): """Encode common content-transfer-encodings (base64, quopri, uuencode).""" if encoding == 'base64': import base64 return base64.encode(input, output) if encoding == 'quoted-printable': import quopri return quopri.encode(input, output, 0) if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'): import uu return uu.encode(input, output) if encoding in ('7bit', '8bit'): output.write(input.read()) if encodetab.has_key(encoding): pipethrough(input, encodetab[encoding], output) else: raise ValueError, \ 'unknown Content-Transfer-Encoding: %s' % encoding
25d1692434bb9613101f623bee2b59d86036a84e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/25d1692434bb9613101f623bee2b59d86036a84e/mimetools.py
def handle(self): """ Handle multiple requests - each expected to be a 4-byte length, followed by the LogRecord in pickle format. Logs the record according to whatever policy is configured locally. """ while 1: try: chunk = self.connection.recv(4) if len(chunk) < 4: break slen = struct.unpack(">L", chunk)[0] #print slen chunk = self.connection.recv(slen) while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) obj = self.unPickle(chunk) record = logging.LogRecord(None, None, "", 0, "", (), None) record.__dict__.update(obj) self.handleLogRecord(record) except: raise
2a1d51602b98effa0feddc2427ba5d8cd0641b77 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/2a1d51602b98effa0feddc2427ba5d8cd0641b77/test_logging.py
rd, wr, ex = select([self.socket.fileno()],
rd, wr, ex = select.select([self.socket.fileno()],
def serve_until_stopped(self): abort = 0 while not abort: rd, wr, ex = select([self.socket.fileno()], [], [], self.timeout) if rd: self.handle_request() abort = self.abort
2a1d51602b98effa0feddc2427ba5d8cd0641b77 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/2a1d51602b98effa0feddc2427ba5d8cd0641b77/test_logging.py
sys.stdout.write("About to start TCP server...\n")
def test_main(): rootLogger = logging.getLogger("") rootLogger.setLevel(logging.DEBUG) hdlr = logging.StreamHandler(sys.stdout) fmt = logging.Formatter(logging.BASIC_FORMAT) hdlr.setFormatter(fmt) rootLogger.addHandler(hdlr) #Set up a handler such that all events are sent via a socket to the log #receiver (logrecv). #The handler will only be added to the rootLogger for some of the tests hdlr = logging.handlers.SocketHandler('localhost', logging.handlers.DEFAULT_TCP_LOGGING_PORT) #Configure the logger for logrecv so events do not propagate beyond it. #The sockLogger output is buffered in memory until the end of the test, #and printed at the end. sockOut = cStringIO.StringIO() sockLogger = logging.getLogger("logrecv") sockLogger.setLevel(logging.DEBUG) sockhdlr = logging.StreamHandler(sockOut) sockhdlr.setFormatter(logging.Formatter( "%(name)s -> %(levelname)s: %(message)s")) sockLogger.addHandler(sockhdlr) sockLogger.propagate = 0 #Set up servers threads = [] tcpserver = LogRecordSocketReceiver() sys.stdout.write("About to start TCP server...\n") threads.append(threading.Thread(target=runTCP, args=(tcpserver,))) for thread in threads: thread.start() try: banner("log_test0", "begin") rootLogger.addHandler(hdlr) test0() rootLogger.removeHandler(hdlr) banner("log_test0", "end") banner("log_test1", "begin") test1() banner("log_test1", "end") banner("log_test2", "begin") test2() banner("log_test2", "end") banner("log_test3", "begin") test3() banner("log_test3", "end") banner("logrecv output", "begin") sys.stdout.write(sockOut.getvalue()) sockOut.close() banner("logrecv output", "end") finally: #shut down server tcpserver.abort = 1 for thread in threads: thread.join()
2a1d51602b98effa0feddc2427ba5d8cd0641b77 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/2a1d51602b98effa0feddc2427ba5d8cd0641b77/test_logging.py
banner("logrecv output", "begin") sys.stdout.write(sockOut.getvalue()) sockOut.close() banner("logrecv output", "end")
def test_main(): rootLogger = logging.getLogger("") rootLogger.setLevel(logging.DEBUG) hdlr = logging.StreamHandler(sys.stdout) fmt = logging.Formatter(logging.BASIC_FORMAT) hdlr.setFormatter(fmt) rootLogger.addHandler(hdlr) #Set up a handler such that all events are sent via a socket to the log #receiver (logrecv). #The handler will only be added to the rootLogger for some of the tests hdlr = logging.handlers.SocketHandler('localhost', logging.handlers.DEFAULT_TCP_LOGGING_PORT) #Configure the logger for logrecv so events do not propagate beyond it. #The sockLogger output is buffered in memory until the end of the test, #and printed at the end. sockOut = cStringIO.StringIO() sockLogger = logging.getLogger("logrecv") sockLogger.setLevel(logging.DEBUG) sockhdlr = logging.StreamHandler(sockOut) sockhdlr.setFormatter(logging.Formatter( "%(name)s -> %(levelname)s: %(message)s")) sockLogger.addHandler(sockhdlr) sockLogger.propagate = 0 #Set up servers threads = [] tcpserver = LogRecordSocketReceiver() sys.stdout.write("About to start TCP server...\n") threads.append(threading.Thread(target=runTCP, args=(tcpserver,))) for thread in threads: thread.start() try: banner("log_test0", "begin") rootLogger.addHandler(hdlr) test0() rootLogger.removeHandler(hdlr) banner("log_test0", "end") banner("log_test1", "begin") test1() banner("log_test1", "end") banner("log_test2", "begin") test2() banner("log_test2", "end") banner("log_test3", "begin") test3() banner("log_test3", "end") banner("logrecv output", "begin") sys.stdout.write(sockOut.getvalue()) sockOut.close() banner("logrecv output", "end") finally: #shut down server tcpserver.abort = 1 for thread in threads: thread.join()
2a1d51602b98effa0feddc2427ba5d8cd0641b77 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/2a1d51602b98effa0feddc2427ba5d8cd0641b77/test_logging.py
sys.stdout.flush()
def test_main(): rootLogger = logging.getLogger("") rootLogger.setLevel(logging.DEBUG) hdlr = logging.StreamHandler(sys.stdout) fmt = logging.Formatter(logging.BASIC_FORMAT) hdlr.setFormatter(fmt) rootLogger.addHandler(hdlr) #Set up a handler such that all events are sent via a socket to the log #receiver (logrecv). #The handler will only be added to the rootLogger for some of the tests hdlr = logging.handlers.SocketHandler('localhost', logging.handlers.DEFAULT_TCP_LOGGING_PORT) #Configure the logger for logrecv so events do not propagate beyond it. #The sockLogger output is buffered in memory until the end of the test, #and printed at the end. sockOut = cStringIO.StringIO() sockLogger = logging.getLogger("logrecv") sockLogger.setLevel(logging.DEBUG) sockhdlr = logging.StreamHandler(sockOut) sockhdlr.setFormatter(logging.Formatter( "%(name)s -> %(levelname)s: %(message)s")) sockLogger.addHandler(sockhdlr) sockLogger.propagate = 0 #Set up servers threads = [] tcpserver = LogRecordSocketReceiver() sys.stdout.write("About to start TCP server...\n") threads.append(threading.Thread(target=runTCP, args=(tcpserver,))) for thread in threads: thread.start() try: banner("log_test0", "begin") rootLogger.addHandler(hdlr) test0() rootLogger.removeHandler(hdlr) banner("log_test0", "end") banner("log_test1", "begin") test1() banner("log_test1", "end") banner("log_test2", "begin") test2() banner("log_test2", "end") banner("log_test3", "begin") test3() banner("log_test3", "end") banner("logrecv output", "begin") sys.stdout.write(sockOut.getvalue()) sockOut.close() banner("logrecv output", "end") finally: #shut down server tcpserver.abort = 1 for thread in threads: thread.join()
2a1d51602b98effa0feddc2427ba5d8cd0641b77 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/2a1d51602b98effa0feddc2427ba5d8cd0641b77/test_logging.py
topdir = os.getcwd() + 'build/rpm'
def run (self):
c4c06af575409157f788425dee4fa2e6b2f647ad /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/c4c06af575409157f788425dee4fa2e6b2f647ad/bdist_rpm.py
'_topdir ' + os.getcwd() + '/build/rpm',])
'_topdir %s/%s' % (os.getcwd(), rpm_base),])
def run (self):
c4c06af575409157f788425dee4fa2e6b2f647ad /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/c4c06af575409157f788425dee4fa2e6b2f647ad/bdist_rpm.py
if self.will_close:
if self.length is None:
def read(self, amt=None): if self.fp is None: return ''
def9d2a17c6f34750f321e88286a08731fdb8b94 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/def9d2a17c6f34750f321e88286a08731fdb8b94/httplib.py
self.length -= amt
def read(self, amt=None): if self.fp is None: return ''
def9d2a17c6f34750f321e88286a08731fdb8b94 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/def9d2a17c6f34750f321e88286a08731fdb8b94/httplib.py
'member', 'sectcode', 'verb'):
'member', 'sectcode', 'verb', 'cfunction', 'cdata', 'ctype', ):
def startchange(): global hist, out hist.chaptertype = "chapter" hist.inenv = [] hist.nodenames = [] hist.cindex = [] hist.inargs = 0 hist.enumeratenesting, hist.itemizenesting = 0, 0 out.doublenodes = [] out.doublecindeces = []
4f6d6e487c786d1591c0e26e671322e470bdd244 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/4f6d6e487c786d1591c0e26e671322e470bdd244/partparse.py
command = '' cat_class = '' if idxsi and idxsi[-1] in ('method', 'protocol', 'attribute'): command = 'defmethod' cat_class = string.join(idxsi[:-1]) elif len(idxsi) == 2 and idxsi[1] == 'function': command = 'deffn' cat_class = string.join(idxsi) elif len(idxsi) == 3 and idxsi[:2] == ['in', 'module']: command = 'deffn' cat_class = 'function of ' + string.join(idxsi[1:]) elif len(idxsi) > 3 and idxsi[:2] == ['in', 'modules']: command = 'deffn' cat_class = 'function of ' + string.join(idxsi[1:]) if not command: raise error, 'don\'t know what to do with indexsubitem ' + `idxsi`
command = 'deffn' if hist.this_module: cat_class = 'function of ' + hist.this_module else: cat_class = 'built-in function'
def do_funcdesc(length, buf, pp, i, index=1): startpoint = i-1 ch = pp[startpoint] wh = ch.where length, newi = getnextarg(length, buf, pp, i) funcname = chunk(GROUP, wh, pp[i:newi]) del pp[i:newi] length = length - (newi-i) save = hist.inargs hist.inargs = 1 length, newi = getnextarg(length, buf, pp, i) hist.inargs = save del save the_args = [chunk(PLAIN, wh, '()'[0])] + pp[i:newi] + \ [chunk(PLAIN, wh, '()'[1])] del pp[i:newi] length = length - (newi-i) idxsi = hist.indexsubitem # words command = '' cat_class = '' if idxsi and idxsi[-1] in ('method', 'protocol', 'attribute'): command = 'defmethod' cat_class = string.join(idxsi[:-1]) elif len(idxsi) == 2 and idxsi[1] == 'function': command = 'deffn' cat_class = string.join(idxsi) elif len(idxsi) == 3 and idxsi[:2] == ['in', 'module']: command = 'deffn' cat_class = 'function of ' + string.join(idxsi[1:]) elif len(idxsi) > 3 and idxsi[:2] == ['in', 'modules']: command = 'deffn' cat_class = 'function of ' + string.join(idxsi[1:]) if not command: raise error, 'don\'t know what to do with indexsubitem ' + `idxsi` ch.chtype = chunk_type[CSLINE] ch.data = command cslinearg = [chunk(GROUP, wh, [chunk(PLAIN, wh, cat_class)])] cslinearg.append(chunk(PLAIN, wh, ' ')) cslinearg.append(funcname) cslinearg.append(chunk(PLAIN, wh, ' ')) l = len(cslinearg) cslinearg[l:l] = the_args pp.insert(i, chunk(GROUP, wh, cslinearg)) i, length = i+1, length+1 hist.command = command return length, i
4f6d6e487c786d1591c0e26e671322e470bdd244 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/4f6d6e487c786d1591c0e26e671322e470bdd244/partparse.py
if len(idxsi) == 2 and idxsi[1] == 'exception': command = 'defvr' cat_class = string.join(idxsi) elif len(idxsi) == 3 and idxsi[:2] == ['in', 'module']: command = 'defcv' cat_class = 'exception' class_class = string.join(idxsi[1:]) elif len(idxsi) == 4 and idxsi[:3] == ['exception', 'in', 'module']: command = 'defcv' cat_class = 'exception' class_class = string.join(idxsi[2:]) elif idxsi == ['built-in', 'exception', 'base', 'class']:
if idxsi == ['built-in', 'exception', 'base', 'class']:
def do_excdesc(length, buf, pp, i): startpoint = i-1 ch = pp[startpoint] wh = ch.where length, newi = getnextarg(length, buf, pp, i) excname = chunk(GROUP, wh, pp[i:newi]) del pp[i:newi] length = length - (newi-i) idxsi = hist.indexsubitem # words command = '' cat_class = '' class_class = '' if len(idxsi) == 2 and idxsi[1] == 'exception': command = 'defvr' cat_class = string.join(idxsi) elif len(idxsi) == 3 and idxsi[:2] == ['in', 'module']: command = 'defcv' cat_class = 'exception' class_class = string.join(idxsi[1:]) elif len(idxsi) == 4 and idxsi[:3] == ['exception', 'in', 'module']: command = 'defcv' cat_class = 'exception' class_class = string.join(idxsi[2:]) elif idxsi == ['built-in', 'exception', 'base', 'class']: command = 'defvr' cat_class = 'exception base class' else: raise error, 'don\'t know what to do with indexsubitem ' + `idxsi` ch.chtype = chunk_type[CSLINE] ch.data = command cslinearg = [chunk(GROUP, wh, [chunk(PLAIN, wh, cat_class)])] cslinearg.append(chunk(PLAIN, wh, ' ')) if class_class: cslinearg.append(chunk(GROUP, wh, [chunk(PLAIN, wh, class_class)])) cslinearg.append(chunk(PLAIN, wh, ' ')) cslinearg.append(excname) pp.insert(i, chunk(GROUP, wh, cslinearg)) i, length = i+1, length+1 hist.command = command return length, i
4f6d6e487c786d1591c0e26e671322e470bdd244 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/4f6d6e487c786d1591c0e26e671322e470bdd244/partparse.py
raise error, 'don\'t know what to do with indexsubitem ' + `idxsi`
command = 'defcv' cat_class = 'exception'
def do_excdesc(length, buf, pp, i): startpoint = i-1 ch = pp[startpoint] wh = ch.where length, newi = getnextarg(length, buf, pp, i) excname = chunk(GROUP, wh, pp[i:newi]) del pp[i:newi] length = length - (newi-i) idxsi = hist.indexsubitem # words command = '' cat_class = '' class_class = '' if len(idxsi) == 2 and idxsi[1] == 'exception': command = 'defvr' cat_class = string.join(idxsi) elif len(idxsi) == 3 and idxsi[:2] == ['in', 'module']: command = 'defcv' cat_class = 'exception' class_class = string.join(idxsi[1:]) elif len(idxsi) == 4 and idxsi[:3] == ['exception', 'in', 'module']: command = 'defcv' cat_class = 'exception' class_class = string.join(idxsi[2:]) elif idxsi == ['built-in', 'exception', 'base', 'class']: command = 'defvr' cat_class = 'exception base class' else: raise error, 'don\'t know what to do with indexsubitem ' + `idxsi` ch.chtype = chunk_type[CSLINE] ch.data = command cslinearg = [chunk(GROUP, wh, [chunk(PLAIN, wh, cat_class)])] cslinearg.append(chunk(PLAIN, wh, ' ')) if class_class: cslinearg.append(chunk(GROUP, wh, [chunk(PLAIN, wh, class_class)])) cslinearg.append(chunk(PLAIN, wh, ' ')) cslinearg.append(excname) pp.insert(i, chunk(GROUP, wh, cslinearg)) i, length = i+1, length+1 hist.command = command return length, i
4f6d6e487c786d1591c0e26e671322e470bdd244 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/4f6d6e487c786d1591c0e26e671322e470bdd244/partparse.py
'funcdescni', 'datadescni'):
'funcdescni', 'datadescni', 'methoddesc', 'memberdesc', 'methoddescni', 'memberdescni', ):
def changeit(buf, pp): global onlylatexspecial, hist, out i, length = 0, len(pp) while 1: # sanity check: length should always equal len(pp) if len(pp) != length: print i, pp[i] raise 'FATAL', 'inconsistent length. thought ' + `length` + ', but should really be ' + `len(pp)` if i >= length: break ch = pp[i] i = i + 1 if type(ch) is StringType: #normally, only chunks are present in pp, # but in some cases, some extra info # has been inserted, e.g., the \end{...} clauses raise 'FATAL', 'got string, probably too many ' + `end` if ch.chtype == chunk_type[GROUP]: # check for {\em ...} constructs data = ch.data if data and \ data[0].chtype == chunk_type[CSNAME] and \ fontchanges.has_key(s(buf, data[0].data)): k = s(buf, data[0].data) del data[0] pp.insert(i-1, chunk(CSNAME, ch.where, fontchanges[k])) length, i = length+1, i+1 elif data: if len(data) \ and data[0].chtype == chunk_type[GROUP] \ and len(data[0].data) \ and data[0].data[0].chtype == chunk_type[CSNAME] \ and s(buf, data[0].data[0].data) == 'e': data[0] = data[0].data[0] print "invoking \\e magic group transform..." else:
4f6d6e487c786d1591c0e26e671322e470bdd244 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/4f6d6e487c786d1591c0e26e671322e470bdd244/partparse.py
stuff = pp[i].data if len(stuff) != 1: raise error, "parameter to \\setindexsubitem{} too long" if pp[i].chtype != chunk_type[GROUP]: raise error, "bad chunk type following \\setindexsubitem" \ "\nexpected GROUP, got " + str(ch.chtype) text = s(buf, stuff[0].data) if text[:1] != '(' or text[-1:] != ')': raise error, \ 'expected indexsubitem enclosed in parenteses' hist.indexsubitem = string.split(text[1:-1]) del stuff, text del pp[i-1:i+1] i = i - 1 length = length - 2
length, i = yank_indexsubitem(pp, length, i, buf, ch, 'setindexsubitem') elif s_buf_data == 'withsubitem': oldsubitem = hist.indexsubitem try: length, i = yank_indexsubitem(pp, length, i, buf, ch, 'withsubitem') stuff = pp[i].data del pp[i] length = length - 1 changeit(buf, stuff) stuff = None finally: hist.indexsubitem = oldsubitem elif s_buf_data in ('textrm', 'pytype'): stuff = pp[i].data pp[i-1:i+1] = stuff length = length - 2 + len(stuff) stuff = None i = i - 1
def changeit(buf, pp): global onlylatexspecial, hist, out i, length = 0, len(pp) while 1: # sanity check: length should always equal len(pp) if len(pp) != length: print i, pp[i] raise 'FATAL', 'inconsistent length. thought ' + `length` + ', but should really be ' + `len(pp)` if i >= length: break ch = pp[i] i = i + 1 if type(ch) is StringType: #normally, only chunks are present in pp, # but in some cases, some extra info # has been inserted, e.g., the \end{...} clauses raise 'FATAL', 'got string, probably too many ' + `end` if ch.chtype == chunk_type[GROUP]: # check for {\em ...} constructs data = ch.data if data and \ data[0].chtype == chunk_type[CSNAME] and \ fontchanges.has_key(s(buf, data[0].data)): k = s(buf, data[0].data) del data[0] pp.insert(i-1, chunk(CSNAME, ch.where, fontchanges[k])) length, i = length+1, i+1 elif data: if len(data) \ and data[0].chtype == chunk_type[GROUP] \ and len(data[0].data) \ and data[0].data[0].chtype == chunk_type[CSNAME] \ and s(buf, data[0].data[0].data) == 'e': data[0] = data[0].data[0] print "invoking \\e magic group transform..." else:
4f6d6e487c786d1591c0e26e671322e470bdd244 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/4f6d6e487c786d1591c0e26e671322e470bdd244/partparse.py
cat_class = '('+string.join(idxsi)+')'
cat_class = '(%s)' % string.join(idxsi)
def changeit(buf, pp): global onlylatexspecial, hist, out i, length = 0, len(pp) while 1: # sanity check: length should always equal len(pp) if len(pp) != length: print i, pp[i] raise 'FATAL', 'inconsistent length. thought ' + `length` + ', but should really be ' + `len(pp)` if i >= length: break ch = pp[i] i = i + 1 if type(ch) is StringType: #normally, only chunks are present in pp, # but in some cases, some extra info # has been inserted, e.g., the \end{...} clauses raise 'FATAL', 'got string, probably too many ' + `end` if ch.chtype == chunk_type[GROUP]: # check for {\em ...} constructs data = ch.data if data and \ data[0].chtype == chunk_type[CSNAME] and \ fontchanges.has_key(s(buf, data[0].data)): k = s(buf, data[0].data) del data[0] pp.insert(i-1, chunk(CSNAME, ch.where, fontchanges[k])) length, i = length+1, i+1 elif data: if len(data) \ and data[0].chtype == chunk_type[GROUP] \ and len(data[0].data) \ and data[0].data[0].chtype == chunk_type[CSNAME] \ and s(buf, data[0].data[0].data) == 'e': data[0] = data[0].data[0] print "invoking \\e magic group transform..." else:
4f6d6e487c786d1591c0e26e671322e470bdd244 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/4f6d6e487c786d1591c0e26e671322e470bdd244/partparse.py
def changeit(buf, pp): global onlylatexspecial, hist, out i, length = 0, len(pp) while 1: # sanity check: length should always equal len(pp) if len(pp) != length: print i, pp[i] raise 'FATAL', 'inconsistent length. thought ' + `length` + ', but should really be ' + `len(pp)` if i >= length: break ch = pp[i] i = i + 1 if type(ch) is StringType: #normally, only chunks are present in pp, # but in some cases, some extra info # has been inserted, e.g., the \end{...} clauses raise 'FATAL', 'got string, probably too many ' + `end` if ch.chtype == chunk_type[GROUP]: # check for {\em ...} constructs data = ch.data if data and \ data[0].chtype == chunk_type[CSNAME] and \ fontchanges.has_key(s(buf, data[0].data)): k = s(buf, data[0].data) del data[0] pp.insert(i-1, chunk(CSNAME, ch.where, fontchanges[k])) length, i = length+1, i+1 elif data: if len(data) \ and data[0].chtype == chunk_type[GROUP] \ and len(data[0].data) \ and data[0].data[0].chtype == chunk_type[CSNAME] \ and s(buf, data[0].data[0].data) == 'e': data[0] = data[0].data[0] print "invoking \\e magic group transform..." else:
4f6d6e487c786d1591c0e26e671322e470bdd244 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/4f6d6e487c786d1591c0e26e671322e470bdd244/partparse.py
ch.chtype = chunk_type[CSLINE] ch.data = 'pindex' length, newi = getnextarg(length, buf, pp, i) ingroupch = pp[i:newi] del pp[i:newi] length = length - (newi-i) ingroupch.append(chunk(PLAIN, ch.where, ' ')) ingroupch.append(chunk(CSNAME, ch.where, 'r')) ingroupch.append(chunk(GROUP, ch.where, [ chunk(PLAIN, ch.where, '(built-in)')])) pp.insert(i, chunk(GROUP, ch.where, ingroupch)) length, i = length+1, i+1 elif s_buf_data == 'refmodindex': ch.chtype = chunk_type[CSLINE] ch.data = 'pindex' length, newi = getnextarg(length, buf, pp, i) ingroupch = pp[i:newi] del pp[i:newi] length = length - (newi-i) pp.insert(i, chunk(GROUP, ch.where, ingroupch)) length, i = length+1, i+1
length, i = add_module_index( pp, length, i, buf, ch, '(built-in)', (s_buf_data[:3] == 'ref')) elif s_buf_data in ('modindex', 'refmodindex'): length, i = add_module_index( pp, length, i, buf, ch, '', (s_buf_data[:3] == 'ref'))
def changeit(buf, pp): global onlylatexspecial, hist, out i, length = 0, len(pp) while 1: # sanity check: length should always equal len(pp) if len(pp) != length: print i, pp[i] raise 'FATAL', 'inconsistent length. thought ' + `length` + ', but should really be ' + `len(pp)` if i >= length: break ch = pp[i] i = i + 1 if type(ch) is StringType: #normally, only chunks are present in pp, # but in some cases, some extra info # has been inserted, e.g., the \end{...} clauses raise 'FATAL', 'got string, probably too many ' + `end` if ch.chtype == chunk_type[GROUP]: # check for {\em ...} constructs data = ch.data if data and \ data[0].chtype == chunk_type[CSNAME] and \ fontchanges.has_key(s(buf, data[0].data)): k = s(buf, data[0].data) del data[0] pp.insert(i-1, chunk(CSNAME, ch.where, fontchanges[k])) length, i = length+1, i+1 elif data: if len(data) \ and data[0].chtype == chunk_type[GROUP] \ and len(data[0].data) \ and data[0].data[0].chtype == chunk_type[CSNAME] \ and s(buf, data[0].data[0].data) == 'e': data[0] = data[0].data[0] print "invoking \\e magic group transform..." else:
4f6d6e487c786d1591c0e26e671322e470bdd244 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/4f6d6e487c786d1591c0e26e671322e470bdd244/partparse.py
ch.chtype = chunk_type[CSLINE] ch.data = 'pindex' length, newi = getnextarg(length, buf, pp, i) ingroupch = pp[i:newi] del pp[i:newi] length = length - (newi-i) ingroupch.append(chunk(PLAIN, ch.where, ' ')) ingroupch.append(chunk(CSNAME, ch.where, 'r')) ingroupch.append(chunk(GROUP, ch.where, [ chunk(PLAIN, ch.where, '(standard)')])) pp.insert(i, chunk(GROUP, ch.where, ingroupch)) length, i = length+1, i+1 elif s_buf_data in ('stmodindex', 'refstmodindex'): ch.chtype = chunk_type[CSLINE] ch.data = 'pindex' length, newi = getnextarg(length, buf, pp, i) ingroupch = pp[i:newi] del pp[i:newi] length = length - (newi-i) ingroupch.append(chunk(PLAIN, ch.where, ' ')) ingroupch.append(chunk(CSNAME, ch.where, 'r')) ingroupch.append(chunk(GROUP, ch.where, [ chunk(PLAIN, ch.where, '(standard)')])) pp.insert(i, chunk(GROUP, ch.where, ingroupch)) length, i = length+1, i+1
length, i = add_module_index( pp, length, i, buf, ch, '(standard)', (s_buf_data[:3] == 'ref')) elif s_buf_data in ('exmodindex', 'refexmodindex'): length, i = add_module_index( pp, length, i, buf, ch, '(extension)', (s_buf_data[:3] == 'ref'))
def changeit(buf, pp): global onlylatexspecial, hist, out i, length = 0, len(pp) while 1: # sanity check: length should always equal len(pp) if len(pp) != length: print i, pp[i] raise 'FATAL', 'inconsistent length. thought ' + `length` + ', but should really be ' + `len(pp)` if i >= length: break ch = pp[i] i = i + 1 if type(ch) is StringType: #normally, only chunks are present in pp, # but in some cases, some extra info # has been inserted, e.g., the \end{...} clauses raise 'FATAL', 'got string, probably too many ' + `end` if ch.chtype == chunk_type[GROUP]: # check for {\em ...} constructs data = ch.data if data and \ data[0].chtype == chunk_type[CSNAME] and \ fontchanges.has_key(s(buf, data[0].data)): k = s(buf, data[0].data) del data[0] pp.insert(i-1, chunk(CSNAME, ch.where, fontchanges[k])) length, i = length+1, i+1 elif data: if len(data) \ and data[0].chtype == chunk_type[GROUP] \ and len(data[0].data) \ and data[0].data[0].chtype == chunk_type[CSNAME] \ and s(buf, data[0].data[0].data) == 'e': data[0] = data[0].data[0] print "invoking \\e magic group transform..." else:
4f6d6e487c786d1591c0e26e671322e470bdd244 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/4f6d6e487c786d1591c0e26e671322e470bdd244/partparse.py
while pp[i+1].chtype == chunk_type[COMMENT]: i = i + 1
def changeit(buf, pp): global onlylatexspecial, hist, out i, length = 0, len(pp) while 1: # sanity check: length should always equal len(pp) if len(pp) != length: print i, pp[i] raise 'FATAL', 'inconsistent length. thought ' + `length` + ', but should really be ' + `len(pp)` if i >= length: break ch = pp[i] i = i + 1 if type(ch) is StringType: #normally, only chunks are present in pp, # but in some cases, some extra info # has been inserted, e.g., the \end{...} clauses raise 'FATAL', 'got string, probably too many ' + `end` if ch.chtype == chunk_type[GROUP]: # check for {\em ...} constructs data = ch.data if data and \ data[0].chtype == chunk_type[CSNAME] and \ fontchanges.has_key(s(buf, data[0].data)): k = s(buf, data[0].data) del data[0] pp.insert(i-1, chunk(CSNAME, ch.where, fontchanges[k])) length, i = length+1, i+1 elif data: if len(data) \ and data[0].chtype == chunk_type[GROUP] \ and len(data[0].data) \ and data[0].data[0].chtype == chunk_type[CSNAME] \ and s(buf, data[0].data[0].data) == 'e': data[0] = data[0].data[0] print "invoking \\e magic group transform..." else:
4f6d6e487c786d1591c0e26e671322e470bdd244 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/4f6d6e487c786d1591c0e26e671322e470bdd244/partparse.py
oparen = chunk(PLAIN, ch.where, " (") data.insert(0, oparen)
data.insert(0, chunk(PLAIN, ch.where, " ("))
def changeit(buf, pp): global onlylatexspecial, hist, out i, length = 0, len(pp) while 1: # sanity check: length should always equal len(pp) if len(pp) != length: print i, pp[i] raise 'FATAL', 'inconsistent length. thought ' + `length` + ', but should really be ' + `len(pp)` if i >= length: break ch = pp[i] i = i + 1 if type(ch) is StringType: #normally, only chunks are present in pp, # but in some cases, some extra info # has been inserted, e.g., the \end{...} clauses raise 'FATAL', 'got string, probably too many ' + `end` if ch.chtype == chunk_type[GROUP]: # check for {\em ...} constructs data = ch.data if data and \ data[0].chtype == chunk_type[CSNAME] and \ fontchanges.has_key(s(buf, data[0].data)): k = s(buf, data[0].data) del data[0] pp.insert(i-1, chunk(CSNAME, ch.where, fontchanges[k])) length, i = length+1, i+1 elif data: if len(data) \ and data[0].chtype == chunk_type[GROUP] \ and len(data[0].data) \ and data[0].data[0].chtype == chunk_type[CSNAME] \ and s(buf, data[0].data[0].data) == 'e': data[0] = data[0].data[0] print "invoking \\e magic group transform..." else:
4f6d6e487c786d1591c0e26e671322e470bdd244 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/4f6d6e487c786d1591c0e26e671322e470bdd244/partparse.py
print 'WARNING: found newline in csline arg'
def dumpit(buf, wm, pp): global out i, length = 0, len(pp) addspace = 0 while 1: if len(pp) != length: raise 'FATAL', 'inconsistent length' if i == length: break ch = pp[i] i = i + 1 dospace = addspace addspace = 0 if ch.chtype == chunk_type[CSNAME]: s_buf_data = s(buf, ch.data)
4f6d6e487c786d1591c0e26e671322e470bdd244 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/4f6d6e487c786d1591c0e26e671322e470bdd244/partparse.py
Split 'text' into multiple lines of no more than 'self.width' characters each, and return the list of strings that results. Tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space.
Reformat the single paragraph in 'text' so it fits in lines of no more than 'self.width' columns, and return a list of wrapped lines. Tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space.
def wrap(self, text): """wrap(text : string) -> [string]
e807e571a1bbd0a4f9f7530ba6d7dabd277d1a3d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/e807e571a1bbd0a4f9f7530ba6d7dabd277d1a3d/textwrap.py
Reformat the paragraph in 'text' to fit in lines of no more than 'width' columns.
Reformat the single paragraph in 'text' to fit in lines of no more than 'self.width' columns, and return a new string containing the entire wrapped paragraph.
def fill(self, text): """fill(text : string) -> string
e807e571a1bbd0a4f9f7530ba6d7dabd277d1a3d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/e807e571a1bbd0a4f9f7530ba6d7dabd277d1a3d/textwrap.py
def fill(self, text): """fill(text : string) -> string
e807e571a1bbd0a4f9f7530ba6d7dabd277d1a3d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/e807e571a1bbd0a4f9f7530ba6d7dabd277d1a3d/textwrap.py
def copytree(src, dst, symlinks=0):
def copytree(src, dst, symlinks=False):
def copytree(src, dst, symlinks=0): """Recursively copy a directory tree using copy2(). The destination directory must not already exist. Error are reported to standard output. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. XXX Consider this example code rather than the ultimate tool. """ names = os.listdir(src) os.mkdir(dst) errors = [] for name in names: srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if symlinks and os.path.islink(srcname): linkto = os.readlink(srcname) os.symlink(linkto, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks) else: copy2(srcname, dstname) # XXX What about devices, sockets etc.? except (IOError, os.error), why: errors.append((srcname, dstname, why)) if errors: raise Error, errors
a4c93b68f164e6e3db881e9af5f0fdc9153680d4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/a4c93b68f164e6e3db881e9af5f0fdc9153680d4/shutil.py
Error are reported to standard output.
If exception(s) occur, an Error is raised with a list of reasons.
def copytree(src, dst, symlinks=0): """Recursively copy a directory tree using copy2(). The destination directory must not already exist. Error are reported to standard output. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. XXX Consider this example code rather than the ultimate tool. """ names = os.listdir(src) os.mkdir(dst) errors = [] for name in names: srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if symlinks and os.path.islink(srcname): linkto = os.readlink(srcname) os.symlink(linkto, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks) else: copy2(srcname, dstname) # XXX What about devices, sockets etc.? except (IOError, os.error), why: errors.append((srcname, dstname, why)) if errors: raise Error, errors
a4c93b68f164e6e3db881e9af5f0fdc9153680d4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/a4c93b68f164e6e3db881e9af5f0fdc9153680d4/shutil.py
def __init__(self, sock):
def __init__(self, sock, debuglevel=0):
def __init__(self, sock): self.fp = sock.makefile('rb', 0)
30f867423a6cc1c0c2fea742e76aa8e288028894 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/30f867423a6cc1c0c2fea742e76aa8e288028894/httplib.py
if self.length == 0: self.close()
def begin(self): if self.msg is not None: # we've already started reading the response return
30f867423a6cc1c0c2fea742e76aa8e288028894 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/30f867423a6cc1c0c2fea742e76aa8e288028894/httplib.py
if self.length == 0 or len(s) < amt: self.close()
def read(self, amt=None): if self.fp is None: return ''
30f867423a6cc1c0c2fea742e76aa8e288028894 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/30f867423a6cc1c0c2fea742e76aa8e288028894/httplib.py
response = self.response_class(self.sock)
if self.debuglevel > 0: response = self.response_class(self.sock, self.debuglevel) else: response = self.response_class(self.sock)
def getresponse(self): "Get the response from the server."
30f867423a6cc1c0c2fea742e76aa8e288028894 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/30f867423a6cc1c0c2fea742e76aa8e288028894/httplib.py
"The class no longer supports the debuglevel." pass
self._conn.set_debuglevel(debuglevel)
def set_debuglevel(self, debuglevel): "The class no longer supports the debuglevel." pass
30f867423a6cc1c0c2fea742e76aa8e288028894 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/30f867423a6cc1c0c2fea742e76aa8e288028894/httplib.py
test_support.run_unittest(TestSFbugs) test_support.run_doctest(difflib)
Doctests = doctest.DocTestSuite(difflib) test_support.run_unittest(TestSFbugs, Doctests)
def test_ratio_for_null_seqn(self): # Check clearing of SF bug 763023 s = difflib.SequenceMatcher(None, [], []) self.assertEqual(s.ratio(), 1) self.assertEqual(s.quick_ratio(), 1) self.assertEqual(s.real_quick_ratio(), 1)
43d790c087be582fc9ae61e382aee9eaafd79738 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/43d790c087be582fc9ae61e382aee9eaafd79738/test_difflib.py
"""Return true if the pathname refers to a symbolic link. Always false on the Mac, until we understand Aliases.)""" return False
"""Return true if the pathname refers to a symbolic link.""" try: import macfs return macfs.ResolveAliasFile(s)[2] except: return False
def islink(s): """Return true if the pathname refers to a symbolic link. Always false on the Mac, until we understand Aliases.)""" return False
992d58b770daa61b209fdc3545b5e962acfc735a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/992d58b770daa61b209fdc3545b5e962acfc735a/macpath.py
if isdir(name):
if isdir(name) and not islink(name):
def walk(top, func, arg): """Directory tree walk with callback function. For each directory in the directory tree rooted at top (including top itself, but excluding '.' and '..'), call func(arg, dirname, fnames). dirname is the name of the directory, and fnames a list of the names of the files and subdirectories in dirname (excluding '.' and '..'). func may modify the fnames list in-place (e.g. via del or slice assignment), and walk will only recurse into the subdirectories whose names remain in fnames; this can be used to implement a filter, or to impose a specific order of visiting. No semantics are defined for, or required of, arg, beyond that arg is always passed to func. It can be used, e.g., to pass a filename pattern, or a mutable object designed to accumulate statistics. Passing None for arg is common.""" try: names = os.listdir(top) except os.error: return func(arg, top, names) for name in names: name = join(top, name) if isdir(name): walk(name, func, arg)
992d58b770daa61b209fdc3545b5e962acfc735a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/992d58b770daa61b209fdc3545b5e962acfc735a/macpath.py
realpath = abspath
def realpath(path): path = abspath(path) try: import macfs except ImportError: return path if not path: return path components = path.split(':') path = components[0] + ':' for c in components[1:]: path = join(path, c) path = macfs.ResolveAliasFile(path)[0].as_pathname() return path
def abspath(path): """Return an absolute path.""" if not isabs(path): path = join(os.getcwd(), path) return normpath(path)
992d58b770daa61b209fdc3545b5e962acfc735a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/992d58b770daa61b209fdc3545b5e962acfc735a/macpath.py
self._err_handler = handler.ErrorHandler()
self._err_handler = handler.ErrorHandler()
def __init__(self): self._cont_handler = handler.ContentHandler() #self._dtd_handler = handler.DTDHandler() #self._ent_handler = handler.EntityResolver() self._err_handler = handler.ErrorHandler()
07cbc4e5bd7fdc2f5e92ac65dbabf35bde002bef /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/07cbc4e5bd7fdc2f5e92ac65dbabf35bde002bef/xmlreader.py
def __init__(self, bufsize=2**16 ): self._bufsize=bufsize XMLReader.__init__( self ) def parse(self, source): self.prepareParser(source) inf=open( source ) buffer = inf.read(self._bufsize)
def __init__(self, bufsize=2**16): self._bufsize = bufsize XMLReader.__init__(self) def _parseOpenFile(self, source): buffer = source.read(self._bufsize)
def __init__(self, bufsize=2**16 ): self._bufsize=bufsize XMLReader.__init__( self )
07cbc4e5bd7fdc2f5e92ac65dbabf35bde002bef /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/07cbc4e5bd7fdc2f5e92ac65dbabf35bde002bef/xmlreader.py
buffer = inf.read(self._bufsize)
buffer = source.read(self._bufsize)
def parse(self, source): self.prepareParser(source) #FIXME: do some type checking: could be already stream, URL or # filename inf=open( source ) buffer = inf.read(self._bufsize) while buffer != "": self.feed(buffer) buffer = inf.read(self._bufsize) self.close() self.reset()
07cbc4e5bd7fdc2f5e92ac65dbabf35bde002bef /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/07cbc4e5bd7fdc2f5e92ac65dbabf35bde002bef/xmlreader.py
if __name__=="__main__":
if __name__ == "__main__":
def _test(): XMLReader() IncrementalParser() Locator() AttributesImpl()
07cbc4e5bd7fdc2f5e92ac65dbabf35bde002bef /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/07cbc4e5bd7fdc2f5e92ac65dbabf35bde002bef/xmlreader.py
and where to install packages"""
and where to install packages."""
def http_error_default(self, url, fp, errcode, errmsg, headers): urllib.URLopener.http_error_default(self, url, fp, errcode, errmsg, headers)
8d326b8581e8246d006bd4c5ab1d9f30972ef5cb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/8d326b8581e8246d006bd4c5ab1d9f30972ef5cb/pimp.py
"responsible" for the contents"""
"responsible" for the contents."""
def compareFlavors(self, left, right): """Compare two flavor strings. This is part of your preferences because whether the user prefers installing from source or binary is.""" if left in self.flavorOrder: if right in self.flavorOrder: return cmp(self.flavorOrder.index(left), self.flavorOrder.index(right)) return -1 if right in self.flavorOrder: return 1 return cmp(left, right)
8d326b8581e8246d006bd4c5ab1d9f30972ef5cb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/8d326b8581e8246d006bd4c5ab1d9f30972ef5cb/pimp.py
errors.append((srcname, dstname, why))
errors.append((srcname, dstname, str(why)))
def copytree(src, dst, symlinks=False): """Recursively copy a directory tree using copy2(). The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. XXX Consider this example code rather than the ultimate tool. """ names = os.listdir(src) os.makedirs(dst) errors = [] for name in names: srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if symlinks and os.path.islink(srcname): linkto = os.readlink(srcname) os.symlink(linkto, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks) else: copy2(srcname, dstname) # XXX What about devices, sockets etc.? except (IOError, os.error), why: errors.append((srcname, dstname, why)) # catch the Error from the recursive copytree so that we can # continue with other files except Error, err: errors.extend(err.args[0]) copystat(src, dst) if errors: raise Error, errors
7a3fd89994b8f79878a2b2d861ed1862b24c5bfe /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/7a3fd89994b8f79878a2b2d861ed1862b24c5bfe/shutil.py
'/usr/include/db3',
'/usr/local/include/db3',
def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
ccfdde86eb56b5e24d44e18aa1c276be475aeee2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/ccfdde86eb56b5e24d44e18aa1c276be475aeee2/setup.py
'/usr/local/include/db3',
'/usr/include/db3',
def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
ccfdde86eb56b5e24d44e18aa1c276be475aeee2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/ccfdde86eb56b5e24d44e18aa1c276be475aeee2/setup.py
'/usr/lib',
'/usr/local/lib',
def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
ccfdde86eb56b5e24d44e18aa1c276be475aeee2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/ccfdde86eb56b5e24d44e18aa1c276be475aeee2/setup.py
'/usr/include/db3',
'/usr/local/include/db3',
def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
ccfdde86eb56b5e24d44e18aa1c276be475aeee2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/ccfdde86eb56b5e24d44e18aa1c276be475aeee2/setup.py
'/usr/local/include/db3',
'/usr/include/db3',
def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
ccfdde86eb56b5e24d44e18aa1c276be475aeee2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/ccfdde86eb56b5e24d44e18aa1c276be475aeee2/setup.py