rem
stringlengths 1
322k
| add
stringlengths 0
2.05M
| context
stringlengths 4
228k
| meta
stringlengths 156
215
|
---|---|---|---|
class HTTPDigestAuthHandler(BaseHandler): """An authentication protocol defined by RFC 2069 Digest authentication improves on basic authentication because it does not transmit passwords in the clear. """ def __init__(self): self.passwd = HTTPPasswordMgr() | class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): header = 'Authorization' def http_error_401(self, req, fp, code, msg, headers): host = urlparse.urlparse(req.get_full_url())[1] return self.http_error_auth_reqed('www-authenticate', host, req, headers) class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): header = 'Proxy-Authorization' def http_error_407(self, req, fp, code, msg, headers): host = req.get_host() return self.http_error_auth_reqed('proxy-authenticate', host, req, headers) class AbstractDigestAuthHandler: def __init__(self, passwd=None): if passwd is None: passwd = HTTPPassowrdMgr() self.passwd = passwd | def retry_http_basic_auth(self, req, realm): if self.__current_realm is None: self.__current_realm = realm else: self.__current_realm = realm return None # XXX host isn't really the correct URI? host = req.get_host() user,pw = self.passwd.find_user_password(realm, host) if pw: raw = "%s:%s" % (user, pw) auth = base64.encodestring(raw).strip() req.add_header('Authorization', 'Basic %s' % auth) resp = self.parent.open(req) self.__current_realm = None return resp else: self.__current_realm = None return None | 8a18e99008c28156a7ba701ca8d6824a50fb0a9e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/8a18e99008c28156a7ba701ca8d6824a50fb0a9e/urllib2.py |
def http_error_401(self, req, fp, code, msg, headers): authreq = headers.get('www-authenticate', None) | def http_error_auth_reqed(self, authreq, host, req, headers): authreq = headers.get(self.header, None) | def http_error_401(self, req, fp, code, msg, headers): # XXX could be mult. headers authreq = headers.get('www-authenticate', None) if authreq: kind = authreq.split()[0] if kind == 'Digest': return self.retry_http_digest_auth(req, authreq) | 8a18e99008c28156a7ba701ca8d6824a50fb0a9e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/8a18e99008c28156a7ba701ca8d6824a50fb0a9e/urllib2.py |
req.add_header('Authorization', 'Digest %s' % auth) | req.add_header(self.header, 'Digest %s' % auth) | def retry_http_digest_auth(self, req, auth): token, challenge = auth.split(' ', 1) chal = parse_keqv_list(parse_http_list(challenge)) auth = self.get_authorization(req, chal) if auth: req.add_header('Authorization', 'Digest %s' % auth) resp = self.parent.open(req) self.__current_realm = None return resp | 8a18e99008c28156a7ba701ca8d6824a50fb0a9e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/8a18e99008c28156a7ba701ca8d6824a50fb0a9e/urllib2.py |
class HTTPHandler(BaseHandler): def http_open(self, req): | class AbstractHTTPHandler(BaseHandler): def do_open(self, http_class, req): | def encode_digest(digest): hexrep = [] for c in digest: n = (ord(c) >> 4) & 0xf hexrep.append(hex(n)[-1]) n = ord(c) & 0xf hexrep.append(hex(n)[-1]) return ''.join(hexrep) | 8a18e99008c28156a7ba701ca8d6824a50fb0a9e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/8a18e99008c28156a7ba701ca8d6824a50fb0a9e/urllib2.py |
h = httplib.HTTP(host) | h = http_class(host) | def http_open(self, req): # XXX devise a new mechanism to specify user/password host = req.get_host() if not host: raise URLError('no host given') | 8a18e99008c28156a7ba701ca8d6824a50fb0a9e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/8a18e99008c28156a7ba701ca8d6824a50fb0a9e/urllib2.py |
print | def basic(src): print print "Testing basic accessors..." cf = ConfigParser.ConfigParser() sio = StringIO.StringIO(src) cf.readfp(sio) L = cf.sections() L.sort() print L for s in L: print "%s: %s" % (s, cf.options(s)) # The use of spaces in the section names serves as a regression test for # SourceForge bug #115357. # http://sourceforge.net/bugs/?func=detailbug&group_id=5470&bug_id=115357 print `cf.get('Foo Bar', 'foo', raw=1)` print `cf.get('Spacey Bar', 'foo', raw=1)` print `cf.get('Commented Bar', 'foo', raw=1)` if '__name__' in cf.options("Foo Bar"): print '__name__ "option" should not be exposed by the API!' else: print '__name__ "option" properly hidden by the API.' # Make sure the right things happen for remove_option(); # added to include check for SourceForge bug #123324: if not cf.remove_option('Foo Bar', 'foo'): raise TestFailed( "remove_option() failed to report existance of option") if cf.has_option('Foo Bar', 'foo'): raise TestFailed("remove_option() failed to remove option") if cf.remove_option('Foo Bar', 'foo'): raise TestFailed( "remove_option() failed to report non-existance of option" " that was removed") try: cf.remove_option('No Such Section', 'foo') except ConfigParser.NoSectionError: pass else: raise TestFailed( "remove_option() failed to report non-existance of option" " that never existed") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
|
print L for s in L: print "%s: %s" % (s, cf.options(s)) | verify(L == ['Commented Bar', 'Foo Bar', 'Internationalized Stuff', 'Spacey Bar'], "unexpected list of section names") | def basic(src): print print "Testing basic accessors..." cf = ConfigParser.ConfigParser() sio = StringIO.StringIO(src) cf.readfp(sio) L = cf.sections() L.sort() print L for s in L: print "%s: %s" % (s, cf.options(s)) # The use of spaces in the section names serves as a regression test for # SourceForge bug #115357. # http://sourceforge.net/bugs/?func=detailbug&group_id=5470&bug_id=115357 print `cf.get('Foo Bar', 'foo', raw=1)` print `cf.get('Spacey Bar', 'foo', raw=1)` print `cf.get('Commented Bar', 'foo', raw=1)` if '__name__' in cf.options("Foo Bar"): print '__name__ "option" should not be exposed by the API!' else: print '__name__ "option" properly hidden by the API.' # Make sure the right things happen for remove_option(); # added to include check for SourceForge bug #123324: if not cf.remove_option('Foo Bar', 'foo'): raise TestFailed( "remove_option() failed to report existance of option") if cf.has_option('Foo Bar', 'foo'): raise TestFailed("remove_option() failed to remove option") if cf.remove_option('Foo Bar', 'foo'): raise TestFailed( "remove_option() failed to report non-existance of option" " that was removed") try: cf.remove_option('No Such Section', 'foo') except ConfigParser.NoSectionError: pass else: raise TestFailed( "remove_option() failed to report non-existance of option" " that never existed") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print `cf.get('Foo Bar', 'foo', raw=1)` print `cf.get('Spacey Bar', 'foo', raw=1)` print `cf.get('Commented Bar', 'foo', raw=1)` | verify(cf.get('Foo Bar', 'foo', raw=1) == 'bar') verify(cf.get('Spacey Bar', 'foo', raw=1) == 'bar') verify(cf.get('Commented Bar', 'foo', raw=1) == 'bar') | def basic(src): print print "Testing basic accessors..." cf = ConfigParser.ConfigParser() sio = StringIO.StringIO(src) cf.readfp(sio) L = cf.sections() L.sort() print L for s in L: print "%s: %s" % (s, cf.options(s)) # The use of spaces in the section names serves as a regression test for # SourceForge bug #115357. # http://sourceforge.net/bugs/?func=detailbug&group_id=5470&bug_id=115357 print `cf.get('Foo Bar', 'foo', raw=1)` print `cf.get('Spacey Bar', 'foo', raw=1)` print `cf.get('Commented Bar', 'foo', raw=1)` if '__name__' in cf.options("Foo Bar"): print '__name__ "option" should not be exposed by the API!' else: print '__name__ "option" properly hidden by the API.' # Make sure the right things happen for remove_option(); # added to include check for SourceForge bug #123324: if not cf.remove_option('Foo Bar', 'foo'): raise TestFailed( "remove_option() failed to report existance of option") if cf.has_option('Foo Bar', 'foo'): raise TestFailed("remove_option() failed to remove option") if cf.remove_option('Foo Bar', 'foo'): raise TestFailed( "remove_option() failed to report non-existance of option" " that was removed") try: cf.remove_option('No Such Section', 'foo') except ConfigParser.NoSectionError: pass else: raise TestFailed( "remove_option() failed to report non-existance of option" " that never existed") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
if '__name__' in cf.options("Foo Bar"): print '__name__ "option" should not be exposed by the API!' else: print '__name__ "option" properly hidden by the API.' | verify('__name__' not in cf.options("Foo Bar"), '__name__ "option" should not be exposed by the API!') | def basic(src): print print "Testing basic accessors..." cf = ConfigParser.ConfigParser() sio = StringIO.StringIO(src) cf.readfp(sio) L = cf.sections() L.sort() print L for s in L: print "%s: %s" % (s, cf.options(s)) # The use of spaces in the section names serves as a regression test for # SourceForge bug #115357. # http://sourceforge.net/bugs/?func=detailbug&group_id=5470&bug_id=115357 print `cf.get('Foo Bar', 'foo', raw=1)` print `cf.get('Spacey Bar', 'foo', raw=1)` print `cf.get('Commented Bar', 'foo', raw=1)` if '__name__' in cf.options("Foo Bar"): print '__name__ "option" should not be exposed by the API!' else: print '__name__ "option" properly hidden by the API.' # Make sure the right things happen for remove_option(); # added to include check for SourceForge bug #123324: if not cf.remove_option('Foo Bar', 'foo'): raise TestFailed( "remove_option() failed to report existance of option") if cf.has_option('Foo Bar', 'foo'): raise TestFailed("remove_option() failed to remove option") if cf.remove_option('Foo Bar', 'foo'): raise TestFailed( "remove_option() failed to report non-existance of option" " that was removed") try: cf.remove_option('No Such Section', 'foo') except ConfigParser.NoSectionError: pass else: raise TestFailed( "remove_option() failed to report non-existance of option" " that never existed") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
if not cf.remove_option('Foo Bar', 'foo'): raise TestFailed( "remove_option() failed to report existance of option") if cf.has_option('Foo Bar', 'foo'): raise TestFailed("remove_option() failed to remove option") if cf.remove_option('Foo Bar', 'foo'): raise TestFailed( "remove_option() failed to report non-existance of option" " that was removed") | verify(cf.remove_option('Foo Bar', 'foo'), "remove_option() failed to report existance of option") verify(not cf.has_option('Foo Bar', 'foo'), "remove_option() failed to remove option") verify(not cf.remove_option('Foo Bar', 'foo'), "remove_option() failed to report non-existance of option" " that was removed") | def basic(src): print print "Testing basic accessors..." cf = ConfigParser.ConfigParser() sio = StringIO.StringIO(src) cf.readfp(sio) L = cf.sections() L.sort() print L for s in L: print "%s: %s" % (s, cf.options(s)) # The use of spaces in the section names serves as a regression test for # SourceForge bug #115357. # http://sourceforge.net/bugs/?func=detailbug&group_id=5470&bug_id=115357 print `cf.get('Foo Bar', 'foo', raw=1)` print `cf.get('Spacey Bar', 'foo', raw=1)` print `cf.get('Commented Bar', 'foo', raw=1)` if '__name__' in cf.options("Foo Bar"): print '__name__ "option" should not be exposed by the API!' else: print '__name__ "option" properly hidden by the API.' # Make sure the right things happen for remove_option(); # added to include check for SourceForge bug #123324: if not cf.remove_option('Foo Bar', 'foo'): raise TestFailed( "remove_option() failed to report existance of option") if cf.has_option('Foo Bar', 'foo'): raise TestFailed("remove_option() failed to remove option") if cf.remove_option('Foo Bar', 'foo'): raise TestFailed( "remove_option() failed to report non-existance of option" " that was removed") try: cf.remove_option('No Such Section', 'foo') except ConfigParser.NoSectionError: pass else: raise TestFailed( "remove_option() failed to report non-existance of option" " that never existed") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print | def interpolation(src): print print "Testing value interpolation..." cf = ConfigParser.ConfigParser({"getname": "%(__name__)s"}) sio = StringIO.StringIO(src) cf.readfp(sio) print `cf.get("Foo", "getname")` print `cf.get("Foo", "bar")` print `cf.get("Foo", "bar9")` print `cf.get("Foo", "bar10")` expect_get_error(cf, ConfigParser.InterpolationDepthError, "Foo", "bar11") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
|
print `cf.get("Foo", "getname")` print `cf.get("Foo", "bar")` print `cf.get("Foo", "bar9")` print `cf.get("Foo", "bar10")` | verify(cf.get("Foo", "getname") == "Foo") verify(cf.get("Foo", "bar") == "something with interpolation (1 step)") verify(cf.get("Foo", "bar9") == "something with lots of interpolation (9 steps)") verify(cf.get("Foo", "bar10") == "something with lots of interpolation (10 steps)") | def interpolation(src): print print "Testing value interpolation..." cf = ConfigParser.ConfigParser({"getname": "%(__name__)s"}) sio = StringIO.StringIO(src) cf.readfp(sio) print `cf.get("Foo", "getname")` print `cf.get("Foo", "bar")` print `cf.get("Foo", "bar9")` print `cf.get("Foo", "bar10")` expect_get_error(cf, ConfigParser.InterpolationDepthError, "Foo", "bar11") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print print "Testing for parsing errors..." | print "Testing parse errors..." | def parse_errors(): print print "Testing for parsing errors..." expect_parse_error(ConfigParser.ParsingError, """[Foo]\n extra-spaces: splat\n""") expect_parse_error(ConfigParser.ParsingError, """[Foo]\n extra-spaces= splat\n""") expect_parse_error(ConfigParser.ParsingError, """[Foo]\noption-without-value\n""") expect_parse_error(ConfigParser.ParsingError, """[Foo]\n:value-without-option-name\n""") expect_parse_error(ConfigParser.ParsingError, """[Foo]\n=value-without-option-name\n""") expect_parse_error(ConfigParser.MissingSectionHeaderError, """No Section!\n""") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print | def query_errors(): print print "Testing query interface..." cf = ConfigParser.ConfigParser() print cf.sections() print "Has section 'Foo'?", cf.has_section("Foo") try: cf.options("Foo") except ConfigParser.NoSectionError, e: print "Caught expected NoSectionError:", e else: print "Failed to catch expected NoSectionError from options()" try: cf.set("foo", "bar", "value") except ConfigParser.NoSectionError, e: print "Caught expected NoSectionError:", e else: print "Failed to catch expected NoSectionError from set()" expect_get_error(cf, ConfigParser.NoSectionError, "foo", "bar") cf.add_section("foo") expect_get_error(cf, ConfigParser.NoOptionError, "foo", "bar") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
|
print cf.sections() print "Has section 'Foo'?", cf.has_section("Foo") | verify(cf.sections() == [], "new ConfigParser should have no defined sections") verify(not cf.has_section("Foo"), "new ConfigParser should have no acknowledged sections") | def query_errors(): print print "Testing query interface..." cf = ConfigParser.ConfigParser() print cf.sections() print "Has section 'Foo'?", cf.has_section("Foo") try: cf.options("Foo") except ConfigParser.NoSectionError, e: print "Caught expected NoSectionError:", e else: print "Failed to catch expected NoSectionError from options()" try: cf.set("foo", "bar", "value") except ConfigParser.NoSectionError, e: print "Caught expected NoSectionError:", e else: print "Failed to catch expected NoSectionError from set()" expect_get_error(cf, ConfigParser.NoSectionError, "foo", "bar") cf.add_section("foo") expect_get_error(cf, ConfigParser.NoOptionError, "foo", "bar") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print "Caught expected NoSectionError:", e | pass | def query_errors(): print print "Testing query interface..." cf = ConfigParser.ConfigParser() print cf.sections() print "Has section 'Foo'?", cf.has_section("Foo") try: cf.options("Foo") except ConfigParser.NoSectionError, e: print "Caught expected NoSectionError:", e else: print "Failed to catch expected NoSectionError from options()" try: cf.set("foo", "bar", "value") except ConfigParser.NoSectionError, e: print "Caught expected NoSectionError:", e else: print "Failed to catch expected NoSectionError from set()" expect_get_error(cf, ConfigParser.NoSectionError, "foo", "bar") cf.add_section("foo") expect_get_error(cf, ConfigParser.NoOptionError, "foo", "bar") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print "Failed to catch expected NoSectionError from options()" | raise TestFailed( "Failed to catch expected NoSectionError from options()") | def query_errors(): print print "Testing query interface..." cf = ConfigParser.ConfigParser() print cf.sections() print "Has section 'Foo'?", cf.has_section("Foo") try: cf.options("Foo") except ConfigParser.NoSectionError, e: print "Caught expected NoSectionError:", e else: print "Failed to catch expected NoSectionError from options()" try: cf.set("foo", "bar", "value") except ConfigParser.NoSectionError, e: print "Caught expected NoSectionError:", e else: print "Failed to catch expected NoSectionError from set()" expect_get_error(cf, ConfigParser.NoSectionError, "foo", "bar") cf.add_section("foo") expect_get_error(cf, ConfigParser.NoOptionError, "foo", "bar") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print "Caught expected NoSectionError:", e | pass | def query_errors(): print print "Testing query interface..." cf = ConfigParser.ConfigParser() print cf.sections() print "Has section 'Foo'?", cf.has_section("Foo") try: cf.options("Foo") except ConfigParser.NoSectionError, e: print "Caught expected NoSectionError:", e else: print "Failed to catch expected NoSectionError from options()" try: cf.set("foo", "bar", "value") except ConfigParser.NoSectionError, e: print "Caught expected NoSectionError:", e else: print "Failed to catch expected NoSectionError from set()" expect_get_error(cf, ConfigParser.NoSectionError, "foo", "bar") cf.add_section("foo") expect_get_error(cf, ConfigParser.NoOptionError, "foo", "bar") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print "Failed to catch expected NoSectionError from set()" | raise TestFailed("Failed to catch expected NoSectionError from set()") | def query_errors(): print print "Testing query interface..." cf = ConfigParser.ConfigParser() print cf.sections() print "Has section 'Foo'?", cf.has_section("Foo") try: cf.options("Foo") except ConfigParser.NoSectionError, e: print "Caught expected NoSectionError:", e else: print "Failed to catch expected NoSectionError from options()" try: cf.set("foo", "bar", "value") except ConfigParser.NoSectionError, e: print "Caught expected NoSectionError:", e else: print "Failed to catch expected NoSectionError from set()" expect_get_error(cf, ConfigParser.NoSectionError, "foo", "bar") cf.add_section("foo") expect_get_error(cf, ConfigParser.NoOptionError, "foo", "bar") | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print | def weird_errors(): print print "Testing miscellaneous error conditions..." cf = ConfigParser.ConfigParser() cf.add_section("Foo") try: cf.add_section("Foo") except ConfigParser.DuplicateSectionError, e: print "Caught expected DuplicateSectionError:", e else: print "Failed to catch expected DuplicateSectionError" | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
|
print "Caught expected DuplicateSectionError:", e | pass | def weird_errors(): print print "Testing miscellaneous error conditions..." cf = ConfigParser.ConfigParser() cf.add_section("Foo") try: cf.add_section("Foo") except ConfigParser.DuplicateSectionError, e: print "Caught expected DuplicateSectionError:", e else: print "Failed to catch expected DuplicateSectionError" | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print "Failed to catch expected DuplicateSectionError" | raise TestFailed("Failed to catch expected DuplicateSectionError") | def weird_errors(): print print "Testing miscellaneous error conditions..." cf = ConfigParser.ConfigParser() cf.add_section("Foo") try: cf.add_section("Foo") except ConfigParser.DuplicateSectionError, e: print "Caught expected DuplicateSectionError:", e else: print "Failed to catch expected DuplicateSectionError" | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print "Caught expected", exctype.__name__, ":" print e | pass | def expect_get_error(cf, exctype, section, option, raw=0): try: cf.get(section, option, raw=raw) except exctype, e: print "Caught expected", exctype.__name__, ":" print e else: print "Failed to catch expected", exctype.__name__ | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print "Failed to catch expected", exctype.__name__ | raise TestFailed("Failed to catch expected " + exctype.__name__) | def expect_get_error(cf, exctype, section, option, raw=0): try: cf.get(section, option, raw=raw) except exctype, e: print "Caught expected", exctype.__name__, ":" print e else: print "Failed to catch expected", exctype.__name__ | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print "Caught expected exception:", e | pass | def expect_parse_error(exctype, src): cf = ConfigParser.ConfigParser() sio = StringIO.StringIO(src) try: cf.readfp(sio) except exctype, e: print "Caught expected exception:", e else: print "Failed to catch expected", exctype.__name__ | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
print "Failed to catch expected", exctype.__name__ | raise TestFailed("Failed to catch expected " + exctype.__name__) | def expect_parse_error(exctype, src): cf = ConfigParser.ConfigParser() sio = StringIO.StringIO(src) try: cf.readfp(sio) except exctype, e: print "Caught expected exception:", e else: print "Failed to catch expected", exctype.__name__ | 95b96d3941b739017f69b8ac695ca1ac25073a55 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/95b96d3941b739017f69b8ac695ca1ac25073a55/test_cfgparser.py |
import StringIO f = StringIO.StringIO(s) g = StringIO.StringIO() encode(f, g) return g.getvalue() | pieces = [] for i in range(0, len(s), MAXBINSIZE): chunk = s[i : i + MAXBINSIZE] pieces.append(binascii.b2a_base64(chunk)) return "".join(pieces) | def encodestring(s): """Encode a string.""" import StringIO f = StringIO.StringIO(s) g = StringIO.StringIO() encode(f, g) return g.getvalue() | fbb2b4c4a5f38f8337f637b31300eb218608d050 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/fbb2b4c4a5f38f8337f637b31300eb218608d050/base64.py |
import StringIO f = StringIO.StringIO(s) g = StringIO.StringIO() decode(f, g) return g.getvalue() | return binascii.a2b_base64(s) | def decodestring(s): """Decode a string.""" import StringIO f = StringIO.StringIO(s) g = StringIO.StringIO() decode(f, g) return g.getvalue() | fbb2b4c4a5f38f8337f637b31300eb218608d050 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/fbb2b4c4a5f38f8337f637b31300eb218608d050/base64.py |
self.checkequal(('http://www.python.org', '', ''), S, 'rpartition', '?') | self.checkequal(('', '', 'http://www.python.org'), S, 'rpartition', '?') | def test_rpartition(self): | 29a5fdb7caf372144a085247d63440d1abb51b5c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/29a5fdb7caf372144a085247d63440d1abb51b5c/string_tests.py |
name = alogger.name namelen = len(name) | def _fixupChildren(self, ph, alogger): """ Ensure that children of the placeholder ph are connected to the specified logger. """ #for c in ph.loggers: for c in ph.loggerMap.keys(): if string.find(c.parent.name, alogger.name) <> 0: alogger.parent = c.parent c.parent = alogger | ef7f3bf3b306cf8702496cb8ac2c7afdcf1b2fdb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/ef7f3bf3b306cf8702496cb8ac2c7afdcf1b2fdb/__init__.py |
|
if string.find(c.parent.name, alogger.name) <> 0: | if c.parent.name[:namelen] != name: | def _fixupChildren(self, ph, alogger): """ Ensure that children of the placeholder ph are connected to the specified logger. """ #for c in ph.loggers: for c in ph.loggerMap.keys(): if string.find(c.parent.name, alogger.name) <> 0: alogger.parent = c.parent c.parent = alogger | ef7f3bf3b306cf8702496cb8ac2c7afdcf1b2fdb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/ef7f3bf3b306cf8702496cb8ac2c7afdcf1b2fdb/__init__.py |
if lastcs is not None: if nextcs is None or nextcs == 'us-ascii': | if lastcs not in (None, 'us-ascii'): if nextcs in (None, 'us-ascii'): | def __unicode__(self): """Helper for the built-in unicode function.""" uchunks = [] lastcs = None for s, charset in self._chunks: # We must preserve spaces between encoded and non-encoded word # boundaries, which means for us we need to add a space when we go # from a charset to None/us-ascii, or from None/us-ascii to a # charset. Only do this for the second and subsequent chunks. nextcs = charset if uchunks: if lastcs is not None: if nextcs is None or nextcs == 'us-ascii': uchunks.append(USPACE) nextcs = None elif nextcs is not None and nextcs <> 'us-ascii': uchunks.append(USPACE) lastcs = nextcs uchunks.append(unicode(s, str(charset))) return UEMPTYSTRING.join(uchunks) | ba1548a7362aecf4c94523579486fed62de12d15 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/ba1548a7362aecf4c94523579486fed62de12d15/Header.py |
elif nextcs is not None and nextcs <> 'us-ascii': | elif nextcs not in (None, 'us-ascii'): | def __unicode__(self): """Helper for the built-in unicode function.""" uchunks = [] lastcs = None for s, charset in self._chunks: # We must preserve spaces between encoded and non-encoded word # boundaries, which means for us we need to add a space when we go # from a charset to None/us-ascii, or from None/us-ascii to a # charset. Only do this for the second and subsequent chunks. nextcs = charset if uchunks: if lastcs is not None: if nextcs is None or nextcs == 'us-ascii': uchunks.append(USPACE) nextcs = None elif nextcs is not None and nextcs <> 'us-ascii': uchunks.append(USPACE) lastcs = nextcs uchunks.append(unicode(s, str(charset))) return UEMPTYSTRING.join(uchunks) | ba1548a7362aecf4c94523579486fed62de12d15 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/ba1548a7362aecf4c94523579486fed62de12d15/Header.py |
self.file = self.sock.makefile('r') | self.file = self.sock.makefile('rb') | def open(self, host, port): """Setup connection to remote server on "host:port". This connection will be used by the routines: read, readline, send, shutdown. """ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect((self.host, self.port)) self.file = self.sock.makefile('r') | c0f1bfec05fcbd44390156f07008dc90f25ef487 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/c0f1bfec05fcbd44390156f07008dc90f25ef487/imaplib.py |
socket.SOL_SOCKET, socket.SO_REUSEADDR, | socket.SOL_SOCKET, reuse_constant, | def set_reuse_addr(self): # try to re-use a server port if possible try: self.socket.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1 ) except socket.error: pass | 5336f8caa4ed6d3777183f8b0ab1fc81db3eec3d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/5336f8caa4ed6d3777183f8b0ab1fc81db3eec3d/asyncore.py |
socket.SO_REUSEADDR) | 1 | reuse_constant) | 1 | def set_reuse_addr(self): # try to re-use a server port if possible try: self.socket.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1 ) except socket.error: pass | 5336f8caa4ed6d3777183f8b0ab1fc81db3eec3d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/5336f8caa4ed6d3777183f8b0ab1fc81db3eec3d/asyncore.py |
def set_reuse_addr(self): # try to re-use a server port if possible try: self.socket.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1 ) except socket.error: pass | 5336f8caa4ed6d3777183f8b0ab1fc81db3eec3d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/5336f8caa4ed6d3777183f8b0ab1fc81db3eec3d/asyncore.py |
||
inst.poll() | if inst.poll(_deadstate=sys.maxint) >= 0: try: _active.remove(inst) except ValueError: pass | def _cleanup(): for inst in _active[:]: inst.poll() | 17de8ffc215d8539860a8a7f06279c4155382c4f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/17de8ffc215d8539860a8a7f06279c4155382c4f/subprocess.py |
_active.append(self) | def __init__(self, args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0): """Create new Popen instance.""" _cleanup() | 17de8ffc215d8539860a8a7f06279c4155382c4f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/17de8ffc215d8539860a8a7f06279c4155382c4f/subprocess.py |
|
def poll(self): | def poll(self, _deadstate=None): | def poll(self): """Check if child process has terminated. Returns returncode attribute.""" if self.returncode is None: if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0: self.returncode = GetExitCodeProcess(self._handle) _active.remove(self) return self.returncode | 17de8ffc215d8539860a8a7f06279c4155382c4f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/17de8ffc215d8539860a8a7f06279c4155382c4f/subprocess.py |
_active.remove(self) | def poll(self): """Check if child process has terminated. Returns returncode attribute.""" if self.returncode is None: if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0: self.returncode = GetExitCodeProcess(self._handle) _active.remove(self) return self.returncode | 17de8ffc215d8539860a8a7f06279c4155382c4f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/17de8ffc215d8539860a8a7f06279c4155382c4f/subprocess.py |
|
_active.remove(self) | def wait(self): """Wait for child process to terminate. Returns returncode attribute.""" if self.returncode is None: obj = WaitForSingleObject(self._handle, INFINITE) self.returncode = GetExitCodeProcess(self._handle) _active.remove(self) return self.returncode | 17de8ffc215d8539860a8a7f06279c4155382c4f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/17de8ffc215d8539860a8a7f06279c4155382c4f/subprocess.py |
|
_active.remove(self) def poll(self): | def poll(self, _deadstate=None): | def _handle_exitstatus(self, sts): if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) elif os.WIFEXITED(sts): self.returncode = os.WEXITSTATUS(sts) else: # Should never happen raise RuntimeError("Unknown child exit status!") | 17de8ffc215d8539860a8a7f06279c4155382c4f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/17de8ffc215d8539860a8a7f06279c4155382c4f/subprocess.py |
pass | if _deadstate is not None: self.returncode = _deadstate | def poll(self): """Check if child process has terminated. Returns returncode attribute.""" if self.returncode is None: try: pid, sts = os.waitpid(self.pid, os.WNOHANG) if pid == self.pid: self._handle_exitstatus(sts) except os.error: pass return self.returncode | 17de8ffc215d8539860a8a7f06279c4155382c4f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/17de8ffc215d8539860a8a7f06279c4155382c4f/subprocess.py |
text='when tab key inserts spaces,\nspaces per tab') | text='when tab key inserts spaces,\nspaces per indent') | def CreatePageFontTab(self): #tkVars self.fontSize=StringVar(self) self.fontBold=BooleanVar(self) self.fontName=StringVar(self) self.spaceNum=IntVar(self) self.tabCols=IntVar(self) self.indentBySpaces=BooleanVar(self) self.editFont=tkFont.Font(self,('courier',12,'normal')) ##widget creation #body frame frame=self.tabPages.pages['Fonts/Tabs']['page'] #body section frames frameFont=Frame(frame,borderwidth=2,relief=GROOVE) frameIndent=Frame(frame,borderwidth=2,relief=GROOVE) #frameFont labelFontTitle=Label(frameFont,text='Set Base Editor Font') frameFontName=Frame(frameFont) frameFontParam=Frame(frameFont) labelFontNameTitle=Label(frameFontName,justify=LEFT, text='Font :') self.listFontName=Listbox(frameFontName,height=5,takefocus=FALSE, exportselection=FALSE) self.listFontName.bind('<ButtonRelease-1>',self.OnListFontButtonRelease) scrollFont=Scrollbar(frameFontName) scrollFont.config(command=self.listFontName.yview) self.listFontName.config(yscrollcommand=scrollFont.set) labelFontSizeTitle=Label(frameFontParam,text='Size :') self.optMenuFontSize=DynOptionMenu(frameFontParam,self.fontSize,None, command=self.SetFontSample) checkFontBold=Checkbutton(frameFontParam,variable=self.fontBold, onvalue=1,offvalue=0,text='Bold',command=self.SetFontSample) frameFontSample=Frame(frameFont,relief=SOLID,borderwidth=1) self.labelFontSample=Label(frameFontSample, text='AaBbCcDdEe\nFfGgHhIiJjK\n1234567890\n#:+=(){}[]', justify=LEFT,font=self.editFont) #frameIndent labelIndentTitle=Label(frameIndent,text='Set Indentation Defaults') frameIndentType=Frame(frameIndent) frameIndentSize=Frame(frameIndent) labelIndentTypeTitle=Label(frameIndentType, text='Choose indentation type :') radioUseSpaces=Radiobutton(frameIndentType,variable=self.indentBySpaces, value=1,text='Tab key inserts spaces') radioUseTabs=Radiobutton(frameIndentType,variable=self.indentBySpaces, value=0,text='Tab key inserts tabs') labelIndentSizeTitle=Label(frameIndentSize, text='Choose indentation size :') labelSpaceNumTitle=Label(frameIndentSize,justify=LEFT, text='when tab key inserts spaces,\nspaces per tab') self.scaleSpaceNum=Scale(frameIndentSize,variable=self.spaceNum, orient='horizontal',tickinterval=2,from_=2,to=8) labeltabColsTitle=Label(frameIndentSize,justify=LEFT, text='when tab key inserts tabs,\ncolumns per tab') self.scaleTabCols=Scale(frameIndentSize,variable=self.tabCols, orient='horizontal',tickinterval=2,from_=2,to=8) #widget packing #body frameFont.pack(side=LEFT,padx=5,pady=10,expand=TRUE,fill=BOTH) frameIndent.pack(side=LEFT,padx=5,pady=10,fill=Y) #frameFont labelFontTitle.pack(side=TOP,anchor=W,padx=5,pady=5) frameFontName.pack(side=TOP,padx=5,pady=5,fill=X) frameFontParam.pack(side=TOP,padx=5,pady=5,fill=X) labelFontNameTitle.pack(side=TOP,anchor=W) self.listFontName.pack(side=LEFT,expand=TRUE,fill=X) scrollFont.pack(side=LEFT,fill=Y) labelFontSizeTitle.pack(side=LEFT,anchor=W) self.optMenuFontSize.pack(side=LEFT,anchor=W) checkFontBold.pack(side=LEFT,anchor=W,padx=20) frameFontSample.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH) self.labelFontSample.pack(expand=TRUE,fill=BOTH) #frameIndent labelIndentTitle.pack(side=TOP,anchor=W,padx=5,pady=5) frameIndentType.pack(side=TOP,padx=5,fill=X) frameIndentSize.pack(side=TOP,padx=5,pady=5,fill=BOTH) labelIndentTypeTitle.pack(side=TOP,anchor=W,padx=5,pady=5) radioUseSpaces.pack(side=TOP,anchor=W,padx=5) radioUseTabs.pack(side=TOP,anchor=W,padx=5) labelIndentSizeTitle.pack(side=TOP,anchor=W,padx=5,pady=5) labelSpaceNumTitle.pack(side=TOP,anchor=W,padx=5) self.scaleSpaceNum.pack(side=TOP,padx=5,fill=X) labeltabColsTitle.pack(side=TOP,anchor=W,padx=5) self.scaleTabCols.pack(side=TOP,padx=5,fill=X) return frame | 63f6714c3ab713767dc35abc2b1129a1d264d858 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/63f6714c3ab713767dc35abc2b1129a1d264d858/configDialog.py |
labeltabColsTitle=Label(frameIndentSize,justify=LEFT, text='when tab key inserts tabs,\ncolumns per tab') self.scaleTabCols=Scale(frameIndentSize,variable=self.tabCols, orient='horizontal',tickinterval=2,from_=2,to=8) | def CreatePageFontTab(self): #tkVars self.fontSize=StringVar(self) self.fontBold=BooleanVar(self) self.fontName=StringVar(self) self.spaceNum=IntVar(self) self.tabCols=IntVar(self) self.indentBySpaces=BooleanVar(self) self.editFont=tkFont.Font(self,('courier',12,'normal')) ##widget creation #body frame frame=self.tabPages.pages['Fonts/Tabs']['page'] #body section frames frameFont=Frame(frame,borderwidth=2,relief=GROOVE) frameIndent=Frame(frame,borderwidth=2,relief=GROOVE) #frameFont labelFontTitle=Label(frameFont,text='Set Base Editor Font') frameFontName=Frame(frameFont) frameFontParam=Frame(frameFont) labelFontNameTitle=Label(frameFontName,justify=LEFT, text='Font :') self.listFontName=Listbox(frameFontName,height=5,takefocus=FALSE, exportselection=FALSE) self.listFontName.bind('<ButtonRelease-1>',self.OnListFontButtonRelease) scrollFont=Scrollbar(frameFontName) scrollFont.config(command=self.listFontName.yview) self.listFontName.config(yscrollcommand=scrollFont.set) labelFontSizeTitle=Label(frameFontParam,text='Size :') self.optMenuFontSize=DynOptionMenu(frameFontParam,self.fontSize,None, command=self.SetFontSample) checkFontBold=Checkbutton(frameFontParam,variable=self.fontBold, onvalue=1,offvalue=0,text='Bold',command=self.SetFontSample) frameFontSample=Frame(frameFont,relief=SOLID,borderwidth=1) self.labelFontSample=Label(frameFontSample, text='AaBbCcDdEe\nFfGgHhIiJjK\n1234567890\n#:+=(){}[]', justify=LEFT,font=self.editFont) #frameIndent labelIndentTitle=Label(frameIndent,text='Set Indentation Defaults') frameIndentType=Frame(frameIndent) frameIndentSize=Frame(frameIndent) labelIndentTypeTitle=Label(frameIndentType, text='Choose indentation type :') radioUseSpaces=Radiobutton(frameIndentType,variable=self.indentBySpaces, value=1,text='Tab key inserts spaces') radioUseTabs=Radiobutton(frameIndentType,variable=self.indentBySpaces, value=0,text='Tab key inserts tabs') labelIndentSizeTitle=Label(frameIndentSize, text='Choose indentation size :') labelSpaceNumTitle=Label(frameIndentSize,justify=LEFT, text='when tab key inserts spaces,\nspaces per tab') self.scaleSpaceNum=Scale(frameIndentSize,variable=self.spaceNum, orient='horizontal',tickinterval=2,from_=2,to=8) labeltabColsTitle=Label(frameIndentSize,justify=LEFT, text='when tab key inserts tabs,\ncolumns per tab') self.scaleTabCols=Scale(frameIndentSize,variable=self.tabCols, orient='horizontal',tickinterval=2,from_=2,to=8) #widget packing #body frameFont.pack(side=LEFT,padx=5,pady=10,expand=TRUE,fill=BOTH) frameIndent.pack(side=LEFT,padx=5,pady=10,fill=Y) #frameFont labelFontTitle.pack(side=TOP,anchor=W,padx=5,pady=5) frameFontName.pack(side=TOP,padx=5,pady=5,fill=X) frameFontParam.pack(side=TOP,padx=5,pady=5,fill=X) labelFontNameTitle.pack(side=TOP,anchor=W) self.listFontName.pack(side=LEFT,expand=TRUE,fill=X) scrollFont.pack(side=LEFT,fill=Y) labelFontSizeTitle.pack(side=LEFT,anchor=W) self.optMenuFontSize.pack(side=LEFT,anchor=W) checkFontBold.pack(side=LEFT,anchor=W,padx=20) frameFontSample.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH) self.labelFontSample.pack(expand=TRUE,fill=BOTH) #frameIndent labelIndentTitle.pack(side=TOP,anchor=W,padx=5,pady=5) frameIndentType.pack(side=TOP,padx=5,fill=X) frameIndentSize.pack(side=TOP,padx=5,pady=5,fill=BOTH) labelIndentTypeTitle.pack(side=TOP,anchor=W,padx=5,pady=5) radioUseSpaces.pack(side=TOP,anchor=W,padx=5) radioUseTabs.pack(side=TOP,anchor=W,padx=5) labelIndentSizeTitle.pack(side=TOP,anchor=W,padx=5,pady=5) labelSpaceNumTitle.pack(side=TOP,anchor=W,padx=5) self.scaleSpaceNum.pack(side=TOP,padx=5,fill=X) labeltabColsTitle.pack(side=TOP,anchor=W,padx=5) self.scaleTabCols.pack(side=TOP,padx=5,fill=X) return frame | 63f6714c3ab713767dc35abc2b1129a1d264d858 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/63f6714c3ab713767dc35abc2b1129a1d264d858/configDialog.py |
|
labeltabColsTitle.pack(side=TOP,anchor=W,padx=5) self.scaleTabCols.pack(side=TOP,padx=5,fill=X) | def CreatePageFontTab(self): #tkVars self.fontSize=StringVar(self) self.fontBold=BooleanVar(self) self.fontName=StringVar(self) self.spaceNum=IntVar(self) self.tabCols=IntVar(self) self.indentBySpaces=BooleanVar(self) self.editFont=tkFont.Font(self,('courier',12,'normal')) ##widget creation #body frame frame=self.tabPages.pages['Fonts/Tabs']['page'] #body section frames frameFont=Frame(frame,borderwidth=2,relief=GROOVE) frameIndent=Frame(frame,borderwidth=2,relief=GROOVE) #frameFont labelFontTitle=Label(frameFont,text='Set Base Editor Font') frameFontName=Frame(frameFont) frameFontParam=Frame(frameFont) labelFontNameTitle=Label(frameFontName,justify=LEFT, text='Font :') self.listFontName=Listbox(frameFontName,height=5,takefocus=FALSE, exportselection=FALSE) self.listFontName.bind('<ButtonRelease-1>',self.OnListFontButtonRelease) scrollFont=Scrollbar(frameFontName) scrollFont.config(command=self.listFontName.yview) self.listFontName.config(yscrollcommand=scrollFont.set) labelFontSizeTitle=Label(frameFontParam,text='Size :') self.optMenuFontSize=DynOptionMenu(frameFontParam,self.fontSize,None, command=self.SetFontSample) checkFontBold=Checkbutton(frameFontParam,variable=self.fontBold, onvalue=1,offvalue=0,text='Bold',command=self.SetFontSample) frameFontSample=Frame(frameFont,relief=SOLID,borderwidth=1) self.labelFontSample=Label(frameFontSample, text='AaBbCcDdEe\nFfGgHhIiJjK\n1234567890\n#:+=(){}[]', justify=LEFT,font=self.editFont) #frameIndent labelIndentTitle=Label(frameIndent,text='Set Indentation Defaults') frameIndentType=Frame(frameIndent) frameIndentSize=Frame(frameIndent) labelIndentTypeTitle=Label(frameIndentType, text='Choose indentation type :') radioUseSpaces=Radiobutton(frameIndentType,variable=self.indentBySpaces, value=1,text='Tab key inserts spaces') radioUseTabs=Radiobutton(frameIndentType,variable=self.indentBySpaces, value=0,text='Tab key inserts tabs') labelIndentSizeTitle=Label(frameIndentSize, text='Choose indentation size :') labelSpaceNumTitle=Label(frameIndentSize,justify=LEFT, text='when tab key inserts spaces,\nspaces per tab') self.scaleSpaceNum=Scale(frameIndentSize,variable=self.spaceNum, orient='horizontal',tickinterval=2,from_=2,to=8) labeltabColsTitle=Label(frameIndentSize,justify=LEFT, text='when tab key inserts tabs,\ncolumns per tab') self.scaleTabCols=Scale(frameIndentSize,variable=self.tabCols, orient='horizontal',tickinterval=2,from_=2,to=8) #widget packing #body frameFont.pack(side=LEFT,padx=5,pady=10,expand=TRUE,fill=BOTH) frameIndent.pack(side=LEFT,padx=5,pady=10,fill=Y) #frameFont labelFontTitle.pack(side=TOP,anchor=W,padx=5,pady=5) frameFontName.pack(side=TOP,padx=5,pady=5,fill=X) frameFontParam.pack(side=TOP,padx=5,pady=5,fill=X) labelFontNameTitle.pack(side=TOP,anchor=W) self.listFontName.pack(side=LEFT,expand=TRUE,fill=X) scrollFont.pack(side=LEFT,fill=Y) labelFontSizeTitle.pack(side=LEFT,anchor=W) self.optMenuFontSize.pack(side=LEFT,anchor=W) checkFontBold.pack(side=LEFT,anchor=W,padx=20) frameFontSample.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH) self.labelFontSample.pack(expand=TRUE,fill=BOTH) #frameIndent labelIndentTitle.pack(side=TOP,anchor=W,padx=5,pady=5) frameIndentType.pack(side=TOP,padx=5,fill=X) frameIndentSize.pack(side=TOP,padx=5,pady=5,fill=BOTH) labelIndentTypeTitle.pack(side=TOP,anchor=W,padx=5,pady=5) radioUseSpaces.pack(side=TOP,anchor=W,padx=5) radioUseTabs.pack(side=TOP,anchor=W,padx=5) labelIndentSizeTitle.pack(side=TOP,anchor=W,padx=5,pady=5) labelSpaceNumTitle.pack(side=TOP,anchor=W,padx=5) self.scaleSpaceNum.pack(side=TOP,padx=5,fill=X) labeltabColsTitle.pack(side=TOP,anchor=W,padx=5) self.scaleTabCols.pack(side=TOP,padx=5,fill=X) return frame | 63f6714c3ab713767dc35abc2b1129a1d264d858 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/63f6714c3ab713767dc35abc2b1129a1d264d858/configDialog.py |
|
def LoadTabCfg(self): ##indent type radibuttons spaceIndent=idleConf.GetOption('main','Indent','use-spaces', default=1,type='bool') self.indentBySpaces.set(spaceIndent) ##indent sizes spaceNum=idleConf.GetOption('main','Indent','num-spaces', default=4,type='int') tabCols=idleConf.GetOption('main','Indent','tab-cols', default=4,type='int') self.spaceNum.set(spaceNum) self.tabCols.set(tabCols) | 63f6714c3ab713767dc35abc2b1129a1d264d858 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/63f6714c3ab713767dc35abc2b1129a1d264d858/configDialog.py |
||
tabCols=idleConf.GetOption('main','Indent','tab-cols', default=4,type='int') | def LoadTabCfg(self): ##indent type radibuttons spaceIndent=idleConf.GetOption('main','Indent','use-spaces', default=1,type='bool') self.indentBySpaces.set(spaceIndent) ##indent sizes spaceNum=idleConf.GetOption('main','Indent','num-spaces', default=4,type='int') tabCols=idleConf.GetOption('main','Indent','tab-cols', default=4,type='int') self.spaceNum.set(spaceNum) self.tabCols.set(tabCols) | 63f6714c3ab713767dc35abc2b1129a1d264d858 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/63f6714c3ab713767dc35abc2b1129a1d264d858/configDialog.py |
|
self.tabCols.set(tabCols) | def LoadTabCfg(self): ##indent type radibuttons spaceIndent=idleConf.GetOption('main','Indent','use-spaces', default=1,type='bool') self.indentBySpaces.set(spaceIndent) ##indent sizes spaceNum=idleConf.GetOption('main','Indent','num-spaces', default=4,type='int') tabCols=idleConf.GetOption('main','Indent','tab-cols', default=4,type='int') self.spaceNum.set(spaceNum) self.tabCols.set(tabCols) | 63f6714c3ab713767dc35abc2b1129a1d264d858 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/63f6714c3ab713767dc35abc2b1129a1d264d858/configDialog.py |
|
def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') | 259b1e18b4b5f8acca8366efa3a06e7d489d1045 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/259b1e18b4b5f8acca8366efa3a06e7d489d1045/setup.py |
||
version_req = '"1.1.4"' | version_req = '"1.1.3"' | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') | 259b1e18b4b5f8acca8366efa3a06e7d489d1045 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/259b1e18b4b5f8acca8366efa3a06e7d489d1045/setup.py |
websucker.Sucker.savefilename(self, url)) | websucker.Sucker.savefilename(self.sucker, url)) | def go(self, event=None): if not self.msgq: self.msgq = Queue.Queue(0) self.check_msgq() if not self.sucker: self.sucker = SuckerThread(self.msgq) if self.sucker.stopit: return self.url_entry.selection_range(0, END) url = self.url_entry.get() url = url.strip() if not url: self.top.bell() self.message("[Error: No URL entered]") return self.rooturl = url dir = self.dir_entry.get().strip() if not dir: self.sucker.savedir = None else: self.sucker.savedir = dir self.sucker.rootdir = os.path.dirname( websucker.Sucker.savefilename(self, url)) self.go_button.configure(state=DISABLED) self.auto_button.configure(state=DISABLED) self.cancel_button.configure(state=NORMAL) self.message( '[running...]') self.sucker.stopit = 0 t = threading.Thread(target=self.sucker.run1, args=(url,)) t.start() | 592c4cc46059e6443b7ae235c73180b75a3a69f7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/592c4cc46059e6443b7ae235c73180b75a3a69f7/wsgui.py |
_platform_cache = {True:None, False:None} _platform_aliased_cache = {True:None, False:None} | _platform_cache_terse = None _platform_cache_not_terse = None _platform_aliased_cache_terse = None _platform_aliased_cache_not_terse = None | def python_compiler(): """ Returns a string identifying the compiler used for compiling Python. """ return _sys_version()[3] | 9001cde559ef1b4f6b700c76bc2c86aef14b4e9d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/9001cde559ef1b4f6b700c76bc2c86aef14b4e9d/platform.py |
global _platform_cache,_platform_aliased_cache if not aliased and (_platform_cache[bool(terse)] is not None): return _platform_cache[bool(terse)] elif _platform_aliased_cache[bool(terse)] is not None: return _platform_aliased_cache[bool(terse)] | global _platform_cache_terse, _platform_cache_not_terse global _platform_aliased_cache_terse, _platform_aliased_cache_not_terse if not aliased and terse and (_platform_cache_terse is not None): return _platform_cache_terse elif not aliased and not terse and (_platform_cache_not_terse is not None): return _platform_cache_not_terse elif terse and _platform_aliased_cache_terse is not None: return _platform_aliased_cache_terse elif not terse and _platform_aliased_cache_not_terse is not None: return _platform_aliased_cache_not_terse | def platform(aliased=0, terse=0): """ Returns a single string identifying the underlying platform with as much useful information as possible (but no more :). The output is intended to be human readable rather than machine parseable. It may look different on different platforms and this is intended. If "aliased" is true, the function will use aliases for various platforms that report system names which differ from their common names, e.g. SunOS will be reported as Solaris. The system_alias() function is used to implement this. Setting terse to true causes the function to return only the absolute minimum information needed to identify the platform. """ global _platform_cache,_platform_aliased_cache if not aliased and (_platform_cache[bool(terse)] is not None): return _platform_cache[bool(terse)] elif _platform_aliased_cache[bool(terse)] is not None: return _platform_aliased_cache[bool(terse)] # Get uname information and then apply platform specific cosmetics # to it... system,node,release,version,machine,processor = uname() if machine == processor: processor = '' if aliased: system,release,version = system_alias(system,release,version) if system == 'Windows': # MS platforms rel,vers,csd,ptype = win32_ver(version) if terse: platform = _platform(system,release) else: platform = _platform(system,release,version,csd) elif system in ('Linux',): # Linux based systems distname,distversion,distid = dist('') if distname and not terse: platform = _platform(system,release,machine,processor, 'with', distname,distversion,distid) else: # If the distribution name is unknown check for libc vs. glibc libcname,libcversion = libc_ver(sys.executable) platform = _platform(system,release,machine,processor, 'with', libcname+libcversion) elif system == 'Java': # Java platforms r,v,vminfo,(os_name,os_version,os_arch) = java_ver() if terse: platform = _platform(system,release,version) else: platform = _platform(system,release,version, 'on', os_name,os_version,os_arch) elif system == 'MacOS': # MacOS platforms if terse: platform = _platform(system,release) else: platform = _platform(system,release,machine) else: # Generic handler if terse: platform = _platform(system,release) else: bits,linkage = architecture(sys.executable) platform = _platform(system,release,machine,processor,bits,linkage) if aliased: _platform_aliased_cache[bool(terse)] = platform elif terse: pass else: _platform_cache[bool(terse)] = platform return platform | 9001cde559ef1b4f6b700c76bc2c86aef14b4e9d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/9001cde559ef1b4f6b700c76bc2c86aef14b4e9d/platform.py |
if aliased: _platform_aliased_cache[bool(terse)] = platform | if aliased and terse: _platform_aliased_cache_terse = platform elif aliased and not terse: _platform_aliased_cache_not_terse = platform | def platform(aliased=0, terse=0): """ Returns a single string identifying the underlying platform with as much useful information as possible (but no more :). The output is intended to be human readable rather than machine parseable. It may look different on different platforms and this is intended. If "aliased" is true, the function will use aliases for various platforms that report system names which differ from their common names, e.g. SunOS will be reported as Solaris. The system_alias() function is used to implement this. Setting terse to true causes the function to return only the absolute minimum information needed to identify the platform. """ global _platform_cache,_platform_aliased_cache if not aliased and (_platform_cache[bool(terse)] is not None): return _platform_cache[bool(terse)] elif _platform_aliased_cache[bool(terse)] is not None: return _platform_aliased_cache[bool(terse)] # Get uname information and then apply platform specific cosmetics # to it... system,node,release,version,machine,processor = uname() if machine == processor: processor = '' if aliased: system,release,version = system_alias(system,release,version) if system == 'Windows': # MS platforms rel,vers,csd,ptype = win32_ver(version) if terse: platform = _platform(system,release) else: platform = _platform(system,release,version,csd) elif system in ('Linux',): # Linux based systems distname,distversion,distid = dist('') if distname and not terse: platform = _platform(system,release,machine,processor, 'with', distname,distversion,distid) else: # If the distribution name is unknown check for libc vs. glibc libcname,libcversion = libc_ver(sys.executable) platform = _platform(system,release,machine,processor, 'with', libcname+libcversion) elif system == 'Java': # Java platforms r,v,vminfo,(os_name,os_version,os_arch) = java_ver() if terse: platform = _platform(system,release,version) else: platform = _platform(system,release,version, 'on', os_name,os_version,os_arch) elif system == 'MacOS': # MacOS platforms if terse: platform = _platform(system,release) else: platform = _platform(system,release,machine) else: # Generic handler if terse: platform = _platform(system,release) else: bits,linkage = architecture(sys.executable) platform = _platform(system,release,machine,processor,bits,linkage) if aliased: _platform_aliased_cache[bool(terse)] = platform elif terse: pass else: _platform_cache[bool(terse)] = platform return platform | 9001cde559ef1b4f6b700c76bc2c86aef14b4e9d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/9001cde559ef1b4f6b700c76bc2c86aef14b4e9d/platform.py |
_platform_cache[bool(terse)] = platform | if terse: _platform_cache_terse = platform else: _platform_cache_not_terse = platform | def platform(aliased=0, terse=0): """ Returns a single string identifying the underlying platform with as much useful information as possible (but no more :). The output is intended to be human readable rather than machine parseable. It may look different on different platforms and this is intended. If "aliased" is true, the function will use aliases for various platforms that report system names which differ from their common names, e.g. SunOS will be reported as Solaris. The system_alias() function is used to implement this. Setting terse to true causes the function to return only the absolute minimum information needed to identify the platform. """ global _platform_cache,_platform_aliased_cache if not aliased and (_platform_cache[bool(terse)] is not None): return _platform_cache[bool(terse)] elif _platform_aliased_cache[bool(terse)] is not None: return _platform_aliased_cache[bool(terse)] # Get uname information and then apply platform specific cosmetics # to it... system,node,release,version,machine,processor = uname() if machine == processor: processor = '' if aliased: system,release,version = system_alias(system,release,version) if system == 'Windows': # MS platforms rel,vers,csd,ptype = win32_ver(version) if terse: platform = _platform(system,release) else: platform = _platform(system,release,version,csd) elif system in ('Linux',): # Linux based systems distname,distversion,distid = dist('') if distname and not terse: platform = _platform(system,release,machine,processor, 'with', distname,distversion,distid) else: # If the distribution name is unknown check for libc vs. glibc libcname,libcversion = libc_ver(sys.executable) platform = _platform(system,release,machine,processor, 'with', libcname+libcversion) elif system == 'Java': # Java platforms r,v,vminfo,(os_name,os_version,os_arch) = java_ver() if terse: platform = _platform(system,release,version) else: platform = _platform(system,release,version, 'on', os_name,os_version,os_arch) elif system == 'MacOS': # MacOS platforms if terse: platform = _platform(system,release) else: platform = _platform(system,release,machine) else: # Generic handler if terse: platform = _platform(system,release) else: bits,linkage = architecture(sys.executable) platform = _platform(system,release,machine,processor,bits,linkage) if aliased: _platform_aliased_cache[bool(terse)] = platform elif terse: pass else: _platform_cache[bool(terse)] = platform return platform | 9001cde559ef1b4f6b700c76bc2c86aef14b4e9d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/9001cde559ef1b4f6b700c76bc2c86aef14b4e9d/platform.py |
version = "HTTP/0.9" | def handle(self): | f69da220bfe1b8a0b8a061a0463fbb7d542ffa0f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f69da220bfe1b8a0b8a061a0463fbb7d542ffa0f/BaseHTTPServer.py |
|
self.send_error(400, "Bad request syntax (%s)" % `command`) | self.send_error(400, "Bad request syntax (%s)" % `requestline`) | def handle(self): | f69da220bfe1b8a0b8a061a0463fbb7d542ffa0f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f69da220bfe1b8a0b8a061a0463fbb7d542ffa0f/BaseHTTPServer.py |
self.send_error(501, "Unsupported method (%s)" % `command`) | self.send_error(501, "Unsupported method (%s)" % `mname`) | def handle(self): | f69da220bfe1b8a0b8a061a0463fbb7d542ffa0f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f69da220bfe1b8a0b8a061a0463fbb7d542ffa0f/BaseHTTPServer.py |
self.assertEqual(grp.getgrgid(e.gr_gid), e) | entriesbygid.setdefault(e.gr_gid, []).append(e) for e in entries: self.assert_(grp.getgrgid(e.gr_gid) in entriesbygid[e.gr_gid]) | def test_values(self): entries = grp.getgrall() | 43bc1f18dbf18b9a7639f0933b829cf83d05c382 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/43bc1f18dbf18b9a7639f0933b829cf83d05c382/test_grp.py |
A2 = "%s:%s" % (req.has_data() and 'POST' or 'GET', | A2 = "%s:%s" % (req.get_method(), | def get_authorization(self, req, chal): try: realm = chal['realm'] nonce = chal['nonce'] qop = chal.get('qop') algorithm = chal.get('algorithm', 'MD5') # mod_digest doesn't send an opaque, even though it isn't # supposed to be optional opaque = chal.get('opaque', None) except KeyError: return None | cdd625a77067e226a5dc715d1892f9511a067391 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/cdd625a77067e226a5dc715d1892f9511a067391/urllib2.py |
def maketables(): | def maketables(trace=0): | def maketables(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) # 1) database properties for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" item = ( category, combining, bidirectional, mirrored ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # 2) decomposition data # FIXME: <fl> using the encoding stuff from unidb would save # another 50k or so, but I'll leave that for 2.1... decomp_data = [""] decomp_index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: if record[5]: try: i = decomp_data.index(record[5]) except ValueError: i = len(decomp_data) decomp_data.append(record[5]) else: i = 0 decomp_index[char] = i FILE = "Modules/unicodedata_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique database records */" print >>fp, \ "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print >>fp, " {%d, %d, %d, %d}," % item print >>fp, "};" print >>fp # FIXME: the following tables should be made static, and # the support code moved into unicodedatabase.c print >>fp, "/* string literals */" print >>fp, "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "static const char *decomp_data[] = {" for name in decomp_data: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" # split record index table index1, index2, shift = splitbins(index) print >>fp, "/* index tables for the database records */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) # split decomposition index table index1, index2, shift = splitbins(decomp_index) print >>fp, "/* index tables for the decomposition data */" print >>fp, "#define DECOMP_SHIFT", shift Array("decomp_index1", index1).dump(fp) Array("decomp_index2", index2).dump(fp) # # 3) unicode type data # extract unicode types dummy = (0, 0, 0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = record[2] bidirectional = record[4] flags = 0 if category in ["Lm", "Lt", "Lu", "Ll", "Lo"]: flags |= ALPHA_MASK if category == "Ll": flags |= LOWER_MASK if category == "Zl" or bidirectional == "B": flags |= LINEBREAK_MASK if category == "Zs" or bidirectional in ("WS", "B", "S"): flags |= SPACE_MASK if category == "Lt": flags |= TITLE_MASK if category == "Lu": flags |= UPPER_MASK # use delta predictor for upper/lower/title if record[12]: upper = (int(record[12], 16) - char) & 0xffff else: upper = 0 if record[13]: lower = (int(record[13], 16) - char) & 0xffff else: lower = 0 if record[14]: title = (int(record[14], 16) - char) & 0xffff else: title = 0 # decimal digit, integer digit decimal = 0 if record[6]: flags |= DECIMAL_MASK decimal = int(record[6]) digit = 0 if record[7]: flags |= DIGIT_MASK digit = int(record[7]) item = ( flags, upper, lower, title, decimal, digit ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i print len(table), "ctype entries" FILE = "Objects/unicodetype_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique character type descriptors */" print >>fp, "const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {" for item in table: print >>fp, " {%d, %d, %d, %d, %d, %d}," % item print >>fp, "};" print >>fp # split decomposition index table index1, index2, shift = splitbins(index) print >>fp, "/* type indexes */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) | fad27aee11e8ece649b72c8ad9513f46882d23ba /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/fad27aee11e8ece649b72c8ad9513f46882d23ba/makeunicodedata.py |
index1, index2, shift = splitbins(index) | index1, index2, shift = splitbins(index, trace) | def maketables(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) # 1) database properties for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" item = ( category, combining, bidirectional, mirrored ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # 2) decomposition data # FIXME: <fl> using the encoding stuff from unidb would save # another 50k or so, but I'll leave that for 2.1... decomp_data = [""] decomp_index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: if record[5]: try: i = decomp_data.index(record[5]) except ValueError: i = len(decomp_data) decomp_data.append(record[5]) else: i = 0 decomp_index[char] = i FILE = "Modules/unicodedata_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique database records */" print >>fp, \ "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print >>fp, " {%d, %d, %d, %d}," % item print >>fp, "};" print >>fp # FIXME: the following tables should be made static, and # the support code moved into unicodedatabase.c print >>fp, "/* string literals */" print >>fp, "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "static const char *decomp_data[] = {" for name in decomp_data: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" # split record index table index1, index2, shift = splitbins(index) print >>fp, "/* index tables for the database records */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) # split decomposition index table index1, index2, shift = splitbins(decomp_index) print >>fp, "/* index tables for the decomposition data */" print >>fp, "#define DECOMP_SHIFT", shift Array("decomp_index1", index1).dump(fp) Array("decomp_index2", index2).dump(fp) # # 3) unicode type data # extract unicode types dummy = (0, 0, 0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = record[2] bidirectional = record[4] flags = 0 if category in ["Lm", "Lt", "Lu", "Ll", "Lo"]: flags |= ALPHA_MASK if category == "Ll": flags |= LOWER_MASK if category == "Zl" or bidirectional == "B": flags |= LINEBREAK_MASK if category == "Zs" or bidirectional in ("WS", "B", "S"): flags |= SPACE_MASK if category == "Lt": flags |= TITLE_MASK if category == "Lu": flags |= UPPER_MASK # use delta predictor for upper/lower/title if record[12]: upper = (int(record[12], 16) - char) & 0xffff else: upper = 0 if record[13]: lower = (int(record[13], 16) - char) & 0xffff else: lower = 0 if record[14]: title = (int(record[14], 16) - char) & 0xffff else: title = 0 # decimal digit, integer digit decimal = 0 if record[6]: flags |= DECIMAL_MASK decimal = int(record[6]) digit = 0 if record[7]: flags |= DIGIT_MASK digit = int(record[7]) item = ( flags, upper, lower, title, decimal, digit ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i print len(table), "ctype entries" FILE = "Objects/unicodetype_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique character type descriptors */" print >>fp, "const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {" for item in table: print >>fp, " {%d, %d, %d, %d, %d, %d}," % item print >>fp, "};" print >>fp # split decomposition index table index1, index2, shift = splitbins(index) print >>fp, "/* type indexes */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) | fad27aee11e8ece649b72c8ad9513f46882d23ba /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/fad27aee11e8ece649b72c8ad9513f46882d23ba/makeunicodedata.py |
index1, index2, shift = splitbins(decomp_index) | index1, index2, shift = splitbins(decomp_index, trace) | def maketables(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) # 1) database properties for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" item = ( category, combining, bidirectional, mirrored ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # 2) decomposition data # FIXME: <fl> using the encoding stuff from unidb would save # another 50k or so, but I'll leave that for 2.1... decomp_data = [""] decomp_index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: if record[5]: try: i = decomp_data.index(record[5]) except ValueError: i = len(decomp_data) decomp_data.append(record[5]) else: i = 0 decomp_index[char] = i FILE = "Modules/unicodedata_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique database records */" print >>fp, \ "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print >>fp, " {%d, %d, %d, %d}," % item print >>fp, "};" print >>fp # FIXME: the following tables should be made static, and # the support code moved into unicodedatabase.c print >>fp, "/* string literals */" print >>fp, "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "static const char *decomp_data[] = {" for name in decomp_data: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" # split record index table index1, index2, shift = splitbins(index) print >>fp, "/* index tables for the database records */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) # split decomposition index table index1, index2, shift = splitbins(decomp_index) print >>fp, "/* index tables for the decomposition data */" print >>fp, "#define DECOMP_SHIFT", shift Array("decomp_index1", index1).dump(fp) Array("decomp_index2", index2).dump(fp) # # 3) unicode type data # extract unicode types dummy = (0, 0, 0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = record[2] bidirectional = record[4] flags = 0 if category in ["Lm", "Lt", "Lu", "Ll", "Lo"]: flags |= ALPHA_MASK if category == "Ll": flags |= LOWER_MASK if category == "Zl" or bidirectional == "B": flags |= LINEBREAK_MASK if category == "Zs" or bidirectional in ("WS", "B", "S"): flags |= SPACE_MASK if category == "Lt": flags |= TITLE_MASK if category == "Lu": flags |= UPPER_MASK # use delta predictor for upper/lower/title if record[12]: upper = (int(record[12], 16) - char) & 0xffff else: upper = 0 if record[13]: lower = (int(record[13], 16) - char) & 0xffff else: lower = 0 if record[14]: title = (int(record[14], 16) - char) & 0xffff else: title = 0 # decimal digit, integer digit decimal = 0 if record[6]: flags |= DECIMAL_MASK decimal = int(record[6]) digit = 0 if record[7]: flags |= DIGIT_MASK digit = int(record[7]) item = ( flags, upper, lower, title, decimal, digit ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i print len(table), "ctype entries" FILE = "Objects/unicodetype_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique character type descriptors */" print >>fp, "const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {" for item in table: print >>fp, " {%d, %d, %d, %d, %d, %d}," % item print >>fp, "};" print >>fp # split decomposition index table index1, index2, shift = splitbins(index) print >>fp, "/* type indexes */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) | fad27aee11e8ece649b72c8ad9513f46882d23ba /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/fad27aee11e8ece649b72c8ad9513f46882d23ba/makeunicodedata.py |
print len(table), "ctype entries" | def maketables(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) # 1) database properties for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" item = ( category, combining, bidirectional, mirrored ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # 2) decomposition data # FIXME: <fl> using the encoding stuff from unidb would save # another 50k or so, but I'll leave that for 2.1... decomp_data = [""] decomp_index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: if record[5]: try: i = decomp_data.index(record[5]) except ValueError: i = len(decomp_data) decomp_data.append(record[5]) else: i = 0 decomp_index[char] = i FILE = "Modules/unicodedata_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique database records */" print >>fp, \ "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print >>fp, " {%d, %d, %d, %d}," % item print >>fp, "};" print >>fp # FIXME: the following tables should be made static, and # the support code moved into unicodedatabase.c print >>fp, "/* string literals */" print >>fp, "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "static const char *decomp_data[] = {" for name in decomp_data: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" # split record index table index1, index2, shift = splitbins(index) print >>fp, "/* index tables for the database records */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) # split decomposition index table index1, index2, shift = splitbins(decomp_index) print >>fp, "/* index tables for the decomposition data */" print >>fp, "#define DECOMP_SHIFT", shift Array("decomp_index1", index1).dump(fp) Array("decomp_index2", index2).dump(fp) # # 3) unicode type data # extract unicode types dummy = (0, 0, 0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = record[2] bidirectional = record[4] flags = 0 if category in ["Lm", "Lt", "Lu", "Ll", "Lo"]: flags |= ALPHA_MASK if category == "Ll": flags |= LOWER_MASK if category == "Zl" or bidirectional == "B": flags |= LINEBREAK_MASK if category == "Zs" or bidirectional in ("WS", "B", "S"): flags |= SPACE_MASK if category == "Lt": flags |= TITLE_MASK if category == "Lu": flags |= UPPER_MASK # use delta predictor for upper/lower/title if record[12]: upper = (int(record[12], 16) - char) & 0xffff else: upper = 0 if record[13]: lower = (int(record[13], 16) - char) & 0xffff else: lower = 0 if record[14]: title = (int(record[14], 16) - char) & 0xffff else: title = 0 # decimal digit, integer digit decimal = 0 if record[6]: flags |= DECIMAL_MASK decimal = int(record[6]) digit = 0 if record[7]: flags |= DIGIT_MASK digit = int(record[7]) item = ( flags, upper, lower, title, decimal, digit ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i print len(table), "ctype entries" FILE = "Objects/unicodetype_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique character type descriptors */" print >>fp, "const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {" for item in table: print >>fp, " {%d, %d, %d, %d, %d, %d}," % item print >>fp, "};" print >>fp # split decomposition index table index1, index2, shift = splitbins(index) print >>fp, "/* type indexes */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) | fad27aee11e8ece649b72c8ad9513f46882d23ba /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/fad27aee11e8ece649b72c8ad9513f46882d23ba/makeunicodedata.py |
|
index1, index2, shift = splitbins(index) | index1, index2, shift = splitbins(index, trace) | def maketables(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) # 1) database properties for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" item = ( category, combining, bidirectional, mirrored ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # 2) decomposition data # FIXME: <fl> using the encoding stuff from unidb would save # another 50k or so, but I'll leave that for 2.1... decomp_data = [""] decomp_index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: if record[5]: try: i = decomp_data.index(record[5]) except ValueError: i = len(decomp_data) decomp_data.append(record[5]) else: i = 0 decomp_index[char] = i FILE = "Modules/unicodedata_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique database records */" print >>fp, \ "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print >>fp, " {%d, %d, %d, %d}," % item print >>fp, "};" print >>fp # FIXME: the following tables should be made static, and # the support code moved into unicodedatabase.c print >>fp, "/* string literals */" print >>fp, "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "static const char *decomp_data[] = {" for name in decomp_data: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" # split record index table index1, index2, shift = splitbins(index) print >>fp, "/* index tables for the database records */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) # split decomposition index table index1, index2, shift = splitbins(decomp_index) print >>fp, "/* index tables for the decomposition data */" print >>fp, "#define DECOMP_SHIFT", shift Array("decomp_index1", index1).dump(fp) Array("decomp_index2", index2).dump(fp) # # 3) unicode type data # extract unicode types dummy = (0, 0, 0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = record[2] bidirectional = record[4] flags = 0 if category in ["Lm", "Lt", "Lu", "Ll", "Lo"]: flags |= ALPHA_MASK if category == "Ll": flags |= LOWER_MASK if category == "Zl" or bidirectional == "B": flags |= LINEBREAK_MASK if category == "Zs" or bidirectional in ("WS", "B", "S"): flags |= SPACE_MASK if category == "Lt": flags |= TITLE_MASK if category == "Lu": flags |= UPPER_MASK # use delta predictor for upper/lower/title if record[12]: upper = (int(record[12], 16) - char) & 0xffff else: upper = 0 if record[13]: lower = (int(record[13], 16) - char) & 0xffff else: lower = 0 if record[14]: title = (int(record[14], 16) - char) & 0xffff else: title = 0 # decimal digit, integer digit decimal = 0 if record[6]: flags |= DECIMAL_MASK decimal = int(record[6]) digit = 0 if record[7]: flags |= DIGIT_MASK digit = int(record[7]) item = ( flags, upper, lower, title, decimal, digit ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i print len(table), "ctype entries" FILE = "Objects/unicodetype_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique character type descriptors */" print >>fp, "const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {" for item in table: print >>fp, " {%d, %d, %d, %d, %d, %d}," % item print >>fp, "};" print >>fp # split decomposition index table index1, index2, shift = splitbins(index) print >>fp, "/* type indexes */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) | fad27aee11e8ece649b72c8ad9513f46882d23ba /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/fad27aee11e8ece649b72c8ad9513f46882d23ba/makeunicodedata.py |
def __init__(self, filename): | def __init__(self, filename, expand=1): | def __init__(self, filename): file = open(filename) table = [None] * 65536 while 1: s = file.readline() if not s: break s = string.split(string.strip(s), ";") char = string.atoi(s[0], 16) table[char] = s | fad27aee11e8ece649b72c8ad9513f46882d23ba /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/fad27aee11e8ece649b72c8ad9513f46882d23ba/makeunicodedata.py |
If optional arg trace is true (default false), progress info is printed to sys.stderr. | If optional arg trace is non-zero (default zero), progress info is printed to sys.stderr. The higher the value, the more info you'll get. | def splitbins(t, trace=0): """t, trace=0 -> (t1, t2, shift). Split a table to save space. t is a sequence of ints. This function can be useful to save space if many of the ints are the same. t1 and t2 are lists of ints, and shift is an int, chosen to minimize the combined size of t1 and t2 (in C code), and where for each i in range(len(t)), t[i] == t2[(t1[i >> shift] << shift) + (i & mask)] where mask is a bitmask isolating the last "shift" bits. If optional arg trace is true (default false), progress info is printed to sys.stderr. """ import sys if trace: def dump(t1, t2, shift, bytes): print >>sys.stderr, "%d+%d bins at shift %d; %d bytes" % ( len(t1), len(t2), shift, bytes) print >>sys.stderr, "Size of original table:", len(t)*getsize(t), \ "bytes" n = len(t)-1 # last valid index maxshift = 0 # the most we can shift n and still have something left if n > 0: while n >> 1: n >>= 1 maxshift += 1 del n bytes = sys.maxint # smallest total size so far t = tuple(t) # so slices can be dict keys for shift in range(maxshift + 1): t1 = [] t2 = [] size = 2**shift bincache = {} for i in range(0, len(t), size): bin = t[i:i+size] index = bincache.get(bin) if index is None: index = len(t2) bincache[bin] = index t2.extend(bin) t1.append(index >> shift) # determine memory size b = len(t1)*getsize(t1) + len(t2)*getsize(t2) if trace: dump(t1, t2, shift, b) if b < bytes: best = t1, t2, shift bytes = b t1, t2, shift = best if trace: print >>sys.stderr, "Best:", dump(t1, t2, shift, bytes) if __debug__: # exhaustively verify that the decomposition is correct mask = ~((~0) << shift) # i.e., low-bit mask of shift bits for i in xrange(len(t)): assert t[i] == t2[(t1[i >> shift] << shift) + (i & mask)] return best | fad27aee11e8ece649b72c8ad9513f46882d23ba /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/fad27aee11e8ece649b72c8ad9513f46882d23ba/makeunicodedata.py |
if trace: | if trace > 1: | def dump(t1, t2, shift, bytes): print >>sys.stderr, "%d+%d bins at shift %d; %d bytes" % ( len(t1), len(t2), shift, bytes) | fad27aee11e8ece649b72c8ad9513f46882d23ba /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/fad27aee11e8ece649b72c8ad9513f46882d23ba/makeunicodedata.py |
maketables() | maketables(1) | def dump(t1, t2, shift, bytes): print >>sys.stderr, "%d+%d bins at shift %d; %d bytes" % ( len(t1), len(t2), shift, bytes) | fad27aee11e8ece649b72c8ad9513f46882d23ba /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/fad27aee11e8ece649b72c8ad9513f46882d23ba/makeunicodedata.py |
def readfp(self): | def readfp(self, fp): | def readfp(self): """Read a single mime.types-format file.""" map = self.types_map while 1: line = f.readline() if not line: break words = line.split() for i in range(len(words)): if words[i][0] == '#': del words[i:] break if not words: continue type, suffixes = words[0], words[1:] for suff in suffixes: map['.' + suff] = type | c019ecb7fe7b44fa339defd071510ec2b0728775 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/c019ecb7fe7b44fa339defd071510ec2b0728775/mimetypes.py |
line = f.readline() | line = fp.readline() | def readfp(self): """Read a single mime.types-format file.""" map = self.types_map while 1: line = f.readline() if not line: break words = line.split() for i in range(len(words)): if words[i][0] == '#': del words[i:] break if not words: continue type, suffixes = words[0], words[1:] for suff in suffixes: map['.' + suff] = type | c019ecb7fe7b44fa339defd071510ec2b0728775 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/c019ecb7fe7b44fa339defd071510ec2b0728775/mimetypes.py |
>>> pik = pickle.dumps(x, 0) >>> dis(pik) | >>> pkl = pickle.dumps(x, 0) >>> dis(pkl) | def dis(pickle, out=None, indentlevel=4): """Produce a symbolic disassembly of a pickle. 'pickle' is a file-like object, or string, containing a (at least one) pickle. The pickle is disassembled from the current position, through the first STOP opcode encountered. Optional arg 'out' is a file-like object to which the disassembly is printed. It defaults to sys.stdout. Optional arg indentlevel is the number of blanks by which to indent a new MARK level. It defaults to 4. """ markstack = [] indentchunk = ' ' * indentlevel for opcode, arg, pos in genops(pickle): if pos is not None: print >> out, "%5d:" % pos, line = "%s %s%s" % (opcode.code, indentchunk * len(markstack), opcode.name) markmsg = None if markstack and markobject in opcode.stack_before: assert markobject not in opcode.stack_after markpos = markstack.pop() if markpos is not None: markmsg = "(MARK at %d)" % markpos if arg is not None or markmsg: # make a mild effort to align arguments line += ' ' * (10 - len(opcode.name)) if arg is not None: line += ' ' + repr(arg) if markmsg: line += ' ' + markmsg print >> out, line if markobject in opcode.stack_after: assert markobject not in opcode.stack_before markstack.append(pos) | 570283584af6a9aff47d2341d6154055572aaff5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/570283584af6a9aff47d2341d6154055572aaff5/pickletools.py |
>>> pik = pickle.dumps(x, 1) >>> dis(pik) | >>> pkl = pickle.dumps(x, 1) >>> dis(pkl) | def dis(pickle, out=None, indentlevel=4): """Produce a symbolic disassembly of a pickle. 'pickle' is a file-like object, or string, containing a (at least one) pickle. The pickle is disassembled from the current position, through the first STOP opcode encountered. Optional arg 'out' is a file-like object to which the disassembly is printed. It defaults to sys.stdout. Optional arg indentlevel is the number of blanks by which to indent a new MARK level. It defaults to 4. """ markstack = [] indentchunk = ' ' * indentlevel for opcode, arg, pos in genops(pickle): if pos is not None: print >> out, "%5d:" % pos, line = "%s %s%s" % (opcode.code, indentchunk * len(markstack), opcode.name) markmsg = None if markstack and markobject in opcode.stack_before: assert markobject not in opcode.stack_after markpos = markstack.pop() if markpos is not None: markmsg = "(MARK at %d)" % markpos if arg is not None or markmsg: # make a mild effort to align arguments line += ' ' * (10 - len(opcode.name)) if arg is not None: line += ' ' + repr(arg) if markmsg: line += ' ' + markmsg print >> out, line if markobject in opcode.stack_after: assert markobject not in opcode.stack_before markstack.append(pos) | 570283584af6a9aff47d2341d6154055572aaff5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/570283584af6a9aff47d2341d6154055572aaff5/pickletools.py |
49: p PUT 4 52: s SETITEM 53: b BUILD 54: a APPEND 55: g GET 1 58: a APPEND 59: . STOP | 49: s SETITEM 50: b BUILD 51: a APPEND 52: g GET 1 55: a APPEND 56: . STOP | def dis(pickle, out=None, indentlevel=4): """Produce a symbolic disassembly of a pickle. 'pickle' is a file-like object, or string, containing a (at least one) pickle. The pickle is disassembled from the current position, through the first STOP opcode encountered. Optional arg 'out' is a file-like object to which the disassembly is printed. It defaults to sys.stdout. Optional arg indentlevel is the number of blanks by which to indent a new MARK level. It defaults to 4. """ markstack = [] indentchunk = ' ' * indentlevel for opcode, arg, pos in genops(pickle): if pos is not None: print >> out, "%5d:" % pos, line = "%s %s%s" % (opcode.code, indentchunk * len(markstack), opcode.name) markmsg = None if markstack and markobject in opcode.stack_before: assert markobject not in opcode.stack_after markpos = markstack.pop() if markpos is not None: markmsg = "(MARK at %d)" % markpos if arg is not None or markmsg: # make a mild effort to align arguments line += ' ' * (10 - len(opcode.name)) if arg is not None: line += ' ' + repr(arg) if markmsg: line += ' ' + markmsg print >> out, line if markobject in opcode.stack_after: assert markobject not in opcode.stack_before markstack.append(pos) | 570283584af6a9aff47d2341d6154055572aaff5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/570283584af6a9aff47d2341d6154055572aaff5/pickletools.py |
__test__ = {'dissassembler_test': _dis_test, | __test__ = {'disassembler_test': _dis_test, | def dis(pickle, out=None, indentlevel=4): """Produce a symbolic disassembly of a pickle. 'pickle' is a file-like object, or string, containing a (at least one) pickle. The pickle is disassembled from the current position, through the first STOP opcode encountered. Optional arg 'out' is a file-like object to which the disassembly is printed. It defaults to sys.stdout. Optional arg indentlevel is the number of blanks by which to indent a new MARK level. It defaults to 4. """ markstack = [] indentchunk = ' ' * indentlevel for opcode, arg, pos in genops(pickle): if pos is not None: print >> out, "%5d:" % pos, line = "%s %s%s" % (opcode.code, indentchunk * len(markstack), opcode.name) markmsg = None if markstack and markobject in opcode.stack_before: assert markobject not in opcode.stack_after markpos = markstack.pop() if markpos is not None: markmsg = "(MARK at %d)" % markpos if arg is not None or markmsg: # make a mild effort to align arguments line += ' ' * (10 - len(opcode.name)) if arg is not None: line += ' ' + repr(arg) if markmsg: line += ' ' + markmsg print >> out, line if markobject in opcode.stack_after: assert markobject not in opcode.stack_before markstack.append(pos) | 570283584af6a9aff47d2341d6154055572aaff5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/570283584af6a9aff47d2341d6154055572aaff5/pickletools.py |
if sys.maxint > 0x7fffffff: self.assertRaises(ValueError, len, r) else: self.assertEqual(len(r), sys.maxint) | self.assertEqual(len(r), sys.maxint) | def test_xrange(self): self.assertEqual(list(xrange(3)), [0, 1, 2]) self.assertEqual(list(xrange(1, 5)), [1, 2, 3, 4]) self.assertEqual(list(xrange(0)), []) self.assertEqual(list(xrange(-3)), []) self.assertEqual(list(xrange(1, 10, 3)), [1, 4, 7]) self.assertEqual(list(xrange(5, -5, -3)), [5, 2, -1, -4]) | f4d8f390536416c083c687206be8e2687e26c9a3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/f4d8f390536416c083c687206be8e2687e26c9a3/test_xrange.py |
return unittest.TestSuite((sqlite_type_suite, decltypes_type_suite, colnames_type_suite, adaptation_suite, date_suite)) | return unittest.TestSuite((sqlite_type_suite, decltypes_type_suite, colnames_type_suite, adaptation_suite, bin_suite, date_suite)) | def suite(): sqlite_type_suite = unittest.makeSuite(SqliteTypeTests, "Check") decltypes_type_suite = unittest.makeSuite(DeclTypesTests, "Check") colnames_type_suite = unittest.makeSuite(ColNamesTests, "Check") adaptation_suite = unittest.makeSuite(ObjectAdaptationTests, "Check") date_suite = unittest.makeSuite(DateTimeTests, "Check") return unittest.TestSuite((sqlite_type_suite, decltypes_type_suite, colnames_type_suite, adaptation_suite, date_suite)) | 762fbd34858f7df608e6da8079bf648bc7d3d8cc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/762fbd34858f7df608e6da8079bf648bc7d3d8cc/types.py |
def remap_element_names(root, name_map): queue = [] for child in root.childNodes: if child.nodeType == ELEMENT: queue.append(child) while queue: node = queue.pop() tagName = node.tagName if name_map.has_key(tagName): name, attrs = name_map[tagName] node._node.name = name for attr, value in attrs.items(): node.setAttribute(attr, value) for child in node.childNodes: if child.nodeType == ELEMENT: queue.append(child) | def remap_element_names(root, name_map): queue = [] for child in root.childNodes: if child.nodeType == ELEMENT: queue.append(child) while queue: node = queue.pop() tagName = node.tagName if name_map.has_key(tagName): name, attrs = name_map[tagName] node._node.name = name for attr, value in attrs.items(): node.setAttribute(attr, value) for child in node.childNodes: if child.nodeType == ELEMENT: queue.append(child) | 82ebc27357891ec342b8602fb28189751b8d06e6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/82ebc27357891ec342b8602fb28189751b8d06e6/docfixer.py |
|
def fixup_table_structures(doc, fragment): # must be done after remap_element_names(), or the tables won't be found for table in find_all_elements(fragment, "table"): fixup_table(doc, table) | 82ebc27357891ec342b8602fb28189751b8d06e6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/82ebc27357891ec342b8602fb28189751b8d06e6/docfixer.py |
||
"sectionauthor", "seealso", | "sectionauthor", "seealso", "itemize", | def move_elements_by_name(doc, source, dest, name, sep=None): nodes = [] for child in source.childNodes: if child.nodeType == ELEMENT and child.tagName == name: nodes.append(child) for node in nodes: source.removeChild(node) dest.appendChild(node) if sep: dest.appendChild(doc.createTextNode(sep)) | 82ebc27357891ec342b8602fb28189751b8d06e6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/82ebc27357891ec342b8602fb28189751b8d06e6/docfixer.py |
"index", "indexii", "indexiii", "indexiv", "setindexsubitem", | "setindexsubitem", | def move_elements_by_name(doc, source, dest, name, sep=None): nodes = [] for child in source.childNodes: if child.nodeType == ELEMENT and child.tagName == name: nodes.append(child) for node in nodes: source.removeChild(node) dest.appendChild(node) if sep: dest.appendChild(doc.createTextNode(sep)) | 82ebc27357891ec342b8602fb28189751b8d06e6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/82ebc27357891ec342b8602fb28189751b8d06e6/docfixer.py |
"moduleauthor", "indexterm", | "moduleauthor", "indexterm", "leader", | def move_elements_by_name(doc, source, dest, name, sep=None): nodes = [] for child in source.childNodes: if child.nodeType == ELEMENT and child.tagName == name: nodes.append(child) for node in nodes: source.removeChild(node) dest.appendChild(node) if sep: dest.appendChild(doc.createTextNode(sep)) | 82ebc27357891ec342b8602fb28189751b8d06e6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/82ebc27357891ec342b8602fb28189751b8d06e6/docfixer.py |
module_name = entry.getAttribute("name") | module_name = entry.getAttribute("module") | def fixup_refmodindexes_chunk(container): # node is probably a <para>; let's see how often it isn't: if container.tagName != PARA_ELEMENT: bwrite("--- fixup_refmodindexes_chunk(%s)\n" % container) module_entries = find_all_elements(container, "module") if not module_entries: return index_entries = find_all_elements_from_set(container, REFMODINDEX_ELEMENTS) removes = [] for entry in index_entries: children = entry.childNodes if len(children) != 0: bwrite("--- unexpected number of children for %s node:\n" % entry.tagName) ewrite(entry.toxml() + "\n") continue found = 0 module_name = entry.getAttribute("name") for node in module_entries: if len(node.childNodes) != 1: continue this_name = node.childNodes[0].data if this_name == module_name: found = 1 node.setAttribute("index", "yes") if found: removes.append(entry) for node in removes: container.removeChild(node) | 82ebc27357891ec342b8602fb28189751b8d06e6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/82ebc27357891ec342b8602fb28189751b8d06e6/docfixer.py |
remap_element_names(fragment, { "tableii": ("table", {"cols": "2"}), "tableiii": ("table", {"cols": "3"}), "tableiv": ("table", {"cols": "4"}), "lineii": ("row", {}), "lineiii": ("row", {}), "lineiv": ("row", {}), "refmodule": ("module", {"link": "link"}), }) | def convert(ifp, ofp): p = esistools.ExtendedEsisBuilder() p.feed(ifp.read()) doc = p.document fragment = p.fragment normalize(fragment) simplify(doc, fragment) handle_labels(doc, fragment) handle_appendix(doc, fragment) fixup_trailing_whitespace(doc, { "abstract": "\n", "title": "", "chapter": "\n\n", "section": "\n\n", "subsection": "\n\n", "subsubsection": "\n\n", "paragraph": "\n\n", "subparagraph": "\n\n", }) cleanup_root_text(doc) cleanup_trailing_parens(fragment, ["function", "method", "cfunction"]) cleanup_synopses(doc, fragment) fixup_descriptors(doc, fragment) fixup_verbatims(fragment) normalize(fragment) fixup_paras(doc, fragment) fixup_sectionauthors(doc, fragment) remap_element_names(fragment, { "tableii": ("table", {"cols": "2"}), "tableiii": ("table", {"cols": "3"}), "tableiv": ("table", {"cols": "4"}), "lineii": ("row", {}), "lineiii": ("row", {}), "lineiv": ("row", {}), "refmodule": ("module", {"link": "link"}), }) fixup_table_structures(doc, fragment) fixup_rfc_references(doc, fragment) fixup_signatures(doc, fragment) add_node_ids(fragment) fixup_refmodindexes(fragment) fixup_bifuncindexes(fragment) # d = {} for gi in p.get_empties(): d[gi] = gi if d.has_key("rfc"): del d["rfc"] knownempty = d.has_key # try: write_esis(fragment, ofp, knownempty) except IOError, (err, msg): # Ignore EPIPE; it just means that whoever we're writing to stopped # reading. The rest of the output would be ignored. All other errors # should still be reported, if err != errno.EPIPE: raise | 82ebc27357891ec342b8602fb28189751b8d06e6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/82ebc27357891ec342b8602fb28189751b8d06e6/docfixer.py |
|
def test_varsized_array(self): array = (c_int * 20)(20, 21, 22, 23, 24, 25, 26, 27, 28, 29) varsize_array = (c_int * 1).from_address(addressof(array)) self.failUnlessEqual(varsize_array[0], 20) self.failUnlessEqual(varsize_array[1], 21) self.failUnlessEqual(varsize_array[2], 22) self.failUnlessEqual(varsize_array[3], 23) self.failUnlessEqual(varsize_array[4], 24) self.failUnlessEqual(varsize_array[5], 25) self.failUnlessEqual(varsize_array[6], 26) self.failUnlessEqual(varsize_array[7], 27) self.failUnlessEqual(varsize_array[8], 28) self.failUnlessEqual(varsize_array[9], 29) self.failUnlessEqual(varsize_array[-1], 20) self.failUnlessRaises(IndexError, lambda: varsize_array[-2]) self.failUnlessRaises(MemoryError, lambda: varsize_array[:]) varsize_array[0] = 100 varsize_array[1] = 101 varsize_array[2] = 102 varsize_array[3] = 103 varsize_array[4] = 104 varsize_array[5] = 105 varsize_array[6] = 106 varsize_array[7] = 107 varsize_array[8] = 108 varsize_array[9] = 109 for i in range(10): self.failUnlessEqual(varsize_array[i], i + 100) self.failUnlessEqual(array[i], i + 100) self.failUnlessEqual(varsize_array[0:10], range(100, 110)) self.failUnlessEqual(varsize_array[1:9], range(101, 109)) self.failUnlessEqual(varsize_array[1:-1], []) varsize_array[0:10] = range(1000, 1010) self.failUnlessEqual(varsize_array[0:10], range(1000, 1010)) varsize_array[1:9] = range(1001, 1009) self.failUnlessEqual(varsize_array[1:9], range(1001, 1009)) def test_vararray_is_sane(self): array = (c_int * 15)(20, 21, 22, 23, 24, 25, 26, 27, 28, 29) varsize_array = (c_int * 1).from_address(addressof(array)) varsize_array[:] = [1, 2, 3, 4, 5] self.failUnlessEqual(array[:], [1, 2, 3, 4, 5, 25, 26, 27, 28, 29, 0, 0, 0, 0, 0]) self.failUnlessEqual(varsize_array[0:10], [1, 2, 3, 4, 5, 25, 26, 27, 28, 29]) array[:5] = [10, 11, 12, 13, 14] self.failUnlessEqual(array[:], [10, 11, 12, 13, 14, 25, 26, 27, 28, 29, 0, 0, 0, 0, 0]) self.failUnlessEqual(varsize_array[0:10], [10, 11, 12, 13, 14, 25, 26, 27, 28, 29]) | def test_varsized_array(self): array = (c_int * 20)(20, 21, 22, 23, 24, 25, 26, 27, 28, 29) | ce049a0aeffff6a37ebb05269e69c81b342c248a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/ce049a0aeffff6a37ebb05269e69c81b342c248a/test_varsize_struct.py |
|
"""Read and parse a list of filenames.""" | """Read and parse a filename or a list of filenames. Files that cannot be opened are silently ignored; this is designed so that you can specifiy a list of potential configuration file locations (e.g. current directory, user's home directory, systemwide directory), and all existing configuration files in the list will be read. A single filename may also be given. """ | def read(self, filenames): """Read and parse a list of filenames.""" if type(filenames) is type(''): filenames = [filenames] for file in filenames: fp = open(file) self.__read(fp) fp.close() | 6a8d84b0c1508d0a3f10f716f3faf3863b41f758 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/6a8d84b0c1508d0a3f10f716f3faf3863b41f758/ConfigParser.py |
for file in filenames: fp = open(file) self.__read(fp) | for filename in filenames: try: fp = open(filename) except IOError: continue self.__read(fp, filename) | def read(self, filenames): """Read and parse a list of filenames.""" if type(filenames) is type(''): filenames = [filenames] for file in filenames: fp = open(file) self.__read(fp) fp.close() | 6a8d84b0c1508d0a3f10f716f3faf3863b41f758 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/6a8d84b0c1508d0a3f10f716f3faf3863b41f758/ConfigParser.py |
def __read(self, fp): | def __read(self, fp, fpname): | def __read(self, fp): """Parse a sectioned setup file. | 6a8d84b0c1508d0a3f10f716f3faf3863b41f758 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/6a8d84b0c1508d0a3f10f716f3faf3863b41f758/ConfigParser.py |
raise MissingSectionHeaderError(fp.name, lineno, `line`) | raise MissingSectionHeaderError(fpname, lineno, `line`) | def __read(self, fp): """Parse a sectioned setup file. | 6a8d84b0c1508d0a3f10f716f3faf3863b41f758 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/6a8d84b0c1508d0a3f10f716f3faf3863b41f758/ConfigParser.py |
e = ParsingError(fp.name) | e = ParsingError(fpname) | def __read(self, fp): """Parse a sectioned setup file. | 6a8d84b0c1508d0a3f10f716f3faf3863b41f758 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/6a8d84b0c1508d0a3f10f716f3faf3863b41f758/ConfigParser.py |
'recv', 'recvfrom', 'send', 'sendall', 'sendto', 'setblocking', | 'sendall', 'setblocking', | def getfqdn(name=''): """Get fully qualified domain name from name. An empty argument is interpreted as meaning the local host. First the hostname returned by gethostbyaddr() is checked, then possibly existing aliases. In case no FQDN is available, hostname is returned. """ name = name.strip() if not name or name == '0.0.0.0': name = gethostname() try: hostname, aliases, ipaddrs = gethostbyaddr(name) except error: pass else: aliases.insert(0, hostname) for name in aliases: if '.' in name: break else: name = hostname return name | c689918c941d366716c9a630414fb6703581b425 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/c689918c941d366716c9a630414fb6703581b425/socket.py |
def __getattr__(self, name): | def _dummy(*args): | def __getattr__(self, name): raise error(9, 'Bad file descriptor') | c689918c941d366716c9a630414fb6703581b425 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/c689918c941d366716c9a630414fb6703581b425/socket.py |
__slots__ = ["_sock"] | __slots__ = ["_sock", "send", "recv", "sendto", "recvfrom"] | def __getattr__(self, name): raise error(9, 'Bad file descriptor') | c689918c941d366716c9a630414fb6703581b425 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/c689918c941d366716c9a630414fb6703581b425/socket.py |
assert sre.split("(b)|(:+)", ":a:b::c") == \ ['', None, ':', 'a', None, ':', '', 'b', None, '', None, '::', 'c'] | def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1) | 067bebfe2da916d1e681db100be4c9a99ff36694 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/067bebfe2da916d1e681db100be4c9a99ff36694/test_sre.py |
|
def updatecache(filename): """Update a cache entry and return its list of lines. If something's wrong, print a message, discard the cache entry, and return an empty list.""" if cache.has_key(filename): del cache[filename] if not filename or filename[0] + filename[-1] == '<>': return [] fullname = filename try: stat = os.stat(fullname) except os.error, msg: # Try looking through the module search path basename = os.path.split(filename)[1] for dirname in sys.path: fullname = os.path.join(dirname, basename) try: stat = os.stat(fullname) break except os.error: pass else: # No luck | 12f21ae07f56611926ec06bd92681e771d29968f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/12f21ae07f56611926ec06bd92681e771d29968f/linecache.py |
||
fullname = os.path.join(dirname, basename) | def updatecache(filename): """Update a cache entry and return its list of lines. If something's wrong, print a message, discard the cache entry, and return an empty list.""" if cache.has_key(filename): del cache[filename] if not filename or filename[0] + filename[-1] == '<>': return [] fullname = filename try: stat = os.stat(fullname) except os.error, msg: # Try looking through the module search path basename = os.path.split(filename)[1] for dirname in sys.path: fullname = os.path.join(dirname, basename) try: stat = os.stat(fullname) break except os.error: pass else: # No luck | 12f21ae07f56611926ec06bd92681e771d29968f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/12f21ae07f56611926ec06bd92681e771d29968f/linecache.py |
|
stat = os.stat(fullname) break except os.error: | fullname = os.path.join(dirname, basename) except (TypeError, AttributeError): | def updatecache(filename): """Update a cache entry and return its list of lines. If something's wrong, print a message, discard the cache entry, and return an empty list.""" if cache.has_key(filename): del cache[filename] if not filename or filename[0] + filename[-1] == '<>': return [] fullname = filename try: stat = os.stat(fullname) except os.error, msg: # Try looking through the module search path basename = os.path.split(filename)[1] for dirname in sys.path: fullname = os.path.join(dirname, basename) try: stat = os.stat(fullname) break except os.error: pass else: # No luck | 12f21ae07f56611926ec06bd92681e771d29968f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/12f21ae07f56611926ec06bd92681e771d29968f/linecache.py |
l = 2.0 * radius * sin(w2*self._invradian) | l = 2.0 * radius * sin(w2*self._invradian) | def circle(self, radius, extent = None): """ Draw a circle with given radius. The center is radius units left of the turtle; extent determines which part of the circle is drawn. If not given, the entire circle is drawn. | 2b88f63a3c78755ff19d8e535fbe93a2f16a87d3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/8546/2b88f63a3c78755ff19d8e535fbe93a2f16a87d3/turtle.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.