rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
"""<?xml version="1.0" encoding="UTF8"?>
"""<?xml version="1.0" encoding="UTF-8"?>
def testRdfDescription2 (self): self.filt("""<?xml version="1.0" encoding="ISO-8859-1"?>
05259048a9462460c5b04c5e6408e09a0d3fee62 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/05259048a9462460c5b04c5e6408e09a0d3fee62/test_xmlrewriter.py
"""<?xml version="1.0" encoding="UTF8"?>
"""<?xml version="1.0" encoding="UTF-8"?>
def testRdfDescription3 (self): self.filt("""<?xml version="1.0" encoding="ISO-8859-1"?>
05259048a9462460c5b04c5e6408e09a0d3fee62 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/05259048a9462460c5b04c5e6408e09a0d3fee62/test_xmlrewriter.py
"""compress the string s. Note that compression state is saved outside of this function in the compression object.
"""Compress the string s. Note that compression state is saved outside of this function in the compression object.
def filter (self, data, **attrs): """compress the string s. Note that compression state is saved outside of this function in the compression object. """ if not attrs.has_key('compressobj'): return data compobj = attrs['compressobj'] if compobj: header = compobj['header'] if header: compobj['header'] = '' wc.log.debug(wc.LOG_FILTER, 'writing gzip header') compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data)) return data
1f510b23d6657be52eedb9e46ccf0a72426d3846 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/1f510b23d6657be52eedb9e46ccf0a72426d3846/Compress.py
if docontinue and serverpool.http_versions.get(addr, 1.1) < 1.1: self.client.error(417, i18n._("Expectation failed"), i18n._("Server does not understand HTTP/1.1")) return if expect:
if docontinue: if serverpool.http_versions.get(addr, 1.1) < 1.1: self.client.error(417, i18n._("Expectation failed"), i18n._("Server does not understand HTTP/1.1")) return elif expect:
def server_connected (self, server): assert self.state == 'connect' if not self.client.connected: # The client has aborted, so let's return this server # connection to the pool server.reuse() return self.server = server addr = (self.ipaddr, self.port) # check expectations expect = self.headers.get('Expect', '').lower().strip() docontinue = expect.startswith('100-continue') or \ expect.startswith('0100-continue') if docontinue and serverpool.http_versions.get(addr, 1.1) < 1.1: self.client.error(417, i18n._("Expectation failed"), i18n._("Server does not understand HTTP/1.1")) return if expect: self.client.error(417, i18n._("Expectation failed"), i18n._("Unsupported expectation `%s'")%expect) return # ok, assign server object self.state = 'response' # At this point, we tell the server that we are the client. # Once we get a response, we transfer to the real client. self.server.client_send_request(self.method, self.hostname, self.document, self.headers, self.content, self, self.nofilter, self.url)
6f8a985770ab84146e873a41bd1e467c06573139 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/6f8a985770ab84146e873a41bd1e467c06573139/ClientServerMatchmaker.py
ext.extra_compile_args.extend(extra)
for opt in extra: if opt not in ext.extra_compile_args: ext.extra_compile_args.append(opt)
def build_extensions (self): # For gcc 3.x we can add -std=gnu99 to get rid of warnings. extra = [] if self.compiler.compiler_type == 'unix': option = "-std=gnu99" if cc_supports_option(self.compiler.compiler, option): extra.append(option) # First, sanity-check the 'extensions' list self.check_extensions_list(self.extensions) for ext in self.extensions: ext.extra_compile_args.extend(extra) self.build_extension(ext)
42c190bdea8e45da09d2080c246a484dd931dd86 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/42c190bdea8e45da09d2080c246a484dd931dd86/setup.py
if part==COMPLETE: return [DATA, ""]
def filter_tag (self, tag, attrs): #debug(NIGHTMARE, "rule %s filter_tag" % self.title) part = self.replace[0] #debug(NIGHTMARE, "original tag", `tag`, "attrs", attrs) #debug(NIGHTMARE, "replace", num_part(part), "with", `self.replace[1]`) if part==TAGNAME: return (STARTTAG, self.replace[1], attrs) if part==TAG: return (DATA, self.replace[1]) if part==ENCLOSED: return (STARTTAG, tag, attrs) if part==COMPLETE: return [DATA, ""] newattrs = {} # look for matching tag attributes for attr,val in attrs.items(): ro = self.attrs.get(attr) if ro: mo = ro.search(val) if mo: if part==ATTR: # replace complete attr if self.replace[1]: newattrs[self.replace[1][0]] = self.replace[1][1] else: # part has to be ATTRVAL # Python has named submatches, and we can use them # the name 'replace' replaces the value, # all other names are given as format strings dict = mo.groupdict() if dict.has_key('replace'): newattrs[attr] = dict['replace'] else: newattrs[attr] = self.replace[1] % dict continue # nothing matched, just append the attribute as is newattrs[attr] = val #debug(NIGHTMARE, "filtered tag", tag, "attrs", newattrs) return (STARTTAG, tag, newattrs)
e079d7fba7adbcde6712c2c20b7bfdb8267ed33a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e079d7fba7adbcde6712c2c20b7bfdb8267ed33a/RewriteRule.py
if self.replace[1]: newattrs[self.replace[1][0]] = self.replace[1][1] else:
for f in self.replace[1].split(): if '=' in self.replace[1]: k,v = f.split('=') newattrs[k] = v else: newattrs[self.replace[1]] = None elif part==ATTRVAL:
def filter_tag (self, tag, attrs): #debug(NIGHTMARE, "rule %s filter_tag" % self.title) part = self.replace[0] #debug(NIGHTMARE, "original tag", `tag`, "attrs", attrs) #debug(NIGHTMARE, "replace", num_part(part), "with", `self.replace[1]`) if part==TAGNAME: return (STARTTAG, self.replace[1], attrs) if part==TAG: return (DATA, self.replace[1]) if part==ENCLOSED: return (STARTTAG, tag, attrs) if part==COMPLETE: return [DATA, ""] newattrs = {} # look for matching tag attributes for attr,val in attrs.items(): ro = self.attrs.get(attr) if ro: mo = ro.search(val) if mo: if part==ATTR: # replace complete attr if self.replace[1]: newattrs[self.replace[1][0]] = self.replace[1][1] else: # part has to be ATTRVAL # Python has named submatches, and we can use them # the name 'replace' replaces the value, # all other names are given as format strings dict = mo.groupdict() if dict.has_key('replace'): newattrs[attr] = dict['replace'] else: newattrs[attr] = self.replace[1] % dict continue # nothing matched, just append the attribute as is newattrs[attr] = val #debug(NIGHTMARE, "filtered tag", tag, "attrs", newattrs) return (STARTTAG, tag, newattrs)
e079d7fba7adbcde6712c2c20b7bfdb8267ed33a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e079d7fba7adbcde6712c2c20b7bfdb8267ed33a/RewriteRule.py
if self.replace[0]==ATTR: val = self.replace[0][0]+'="'+self.replace[0][1]+'"' else: val = self.replace[1] s += '>'+xmlify(val)+"</replace>\n"
s += '>'+xmlify(self.replace[1])+"</replace>\n"
def toxml (self): s = UrlRule.toxml(self) if self.tag!='a': s += '\n tag="%s"' % self.tag if not (self.attrs or self.replace or self.enclosed): return s+"/>\n" s += ">\n" for key,val in self.attrs.items(): s += "<attr" if key!='href': s += ' name="%s"' % key if val: s += ">"+xmlify(val)+"</attr>\n" else: s += "/>\n" if self.enclosed: s += "<enclosed>"+xmlify(self.enclosed)+"</enclosed>\n" if not self.replace[0]==COMPLETE or self.replace[1]: s += "<replace" if self.replace[0]!=COMPLETE: s += ' part="%s"' % num_part(self.replace[0]) if self.replace[1]: if self.replace[0]==ATTR: val = self.replace[0][0]+'="'+self.replace[0][1]+'"' else: val = self.replace[1] s += '>'+xmlify(val)+"</replace>\n" else: s += "/>\n" return s + "</rewrite>"
e079d7fba7adbcde6712c2c20b7bfdb8267ed33a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e079d7fba7adbcde6712c2c20b7bfdb8267ed33a/RewriteRule.py
print "XXX", currule
def _form_selrule (index): """ Select a rule. """ try: index = int(index) global currule currule = [r for r in curfolder.rules if r.oid == index][0] print "XXX", currule # fill ruletype flags for rt in rulenames: ruletype[rt] = (currule.name == rt) # XXX this side effect is bad :( # fill part flags if currule.name == u"htmlrewrite": global curparts curparts = {} for i, part in enumerate(partvalnames): curparts[part] = (currule.part == i) elif currule.name == u"xmlrewrite": global curreplacetypes curreplacetypes = {} for name, num in replacetypenums.items(): curreplacetypes[name] = (currule.replacetypenum == num) elif currule.name == u"header": global curfilterstage, curheaderaction curfilterstage = { u'both': currule.filterstage == u'both', u'request': currule.filterstage == u'request', u'response': currule.filterstage == u'response', } curheaderaction = { u'add': currule.action == u'add', u'replace': currule.action == u'replace', u'remove': currule.action == u'remove', } except (ValueError, IndexError, OverflowError): error['ruleindex'] = True
f64bbf8f838e5c6511f6c72e64a915bf832022e7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/f64bbf8f838e5c6511f6c72e64a915bf832022e7/filterconfig_html.py
self.request = "%s %s %s" % (self.method, self.url, self.protocol) wc.log.debug(wc.LOG_PROXY, "%s request %r", self, self.request)
request = "%s %s %s" % (self.method, self.url, self.protocol) wc.log.debug(wc.LOG_PROXY, "%s request %r", self, request)
def process_request (self): """read request, split it up and filter it""" # One newline ends request i = self.recv_buffer.find('\r\n') if i < 0: return # self.read(i) is not including the newline self.request = self.read(i) # basic request checking (more will be done below) try: self.method, self.url, protocol = self.request.split() except ValueError: self.error(400, _("Can't parse request")) return if not self.allow.method(self.method): self.error(405, _("Method Not Allowed")) return # fix broken url paths self.url = wc.url.url_norm(self.url)[0] if not self.url: self.error(400, _("Empty URL")) return self.protocol = wc.proxy.fix_http_version(protocol) self.http_ver = wc.proxy.get_http_version(self.protocol) # build request self.request = "%s %s %s" % (self.method, self.url, self.protocol) wc.log.debug(wc.LOG_PROXY, "%s request %r", self, self.request) # filter request attrs = wc.filter.get_filterattrs(self.url, wc.filter.STAGE_REQUEST) self.request = wc.filter.applyfilter(self.request, "finish", attrs) # final request checking if not self.fix_request(): return wc.log.info(wc.LOG_ACCESS, '%s - %s - %s', self.addr[0], time.ctime(time.time()), self.request) self.state = 'headers'
0b5979499cb4681b782053f64a61e20d99ddb896 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/0b5979499cb4681b782053f64a61e20d99ddb896/HttpClient.py
self.request = wc.filter.applyfilter(self.request, "finish", attrs)
self.request = wc.filter.applyfilter(request, "finish", attrs)
def process_request (self): """read request, split it up and filter it""" # One newline ends request i = self.recv_buffer.find('\r\n') if i < 0: return # self.read(i) is not including the newline self.request = self.read(i) # basic request checking (more will be done below) try: self.method, self.url, protocol = self.request.split() except ValueError: self.error(400, _("Can't parse request")) return if not self.allow.method(self.method): self.error(405, _("Method Not Allowed")) return # fix broken url paths self.url = wc.url.url_norm(self.url)[0] if not self.url: self.error(400, _("Empty URL")) return self.protocol = wc.proxy.fix_http_version(protocol) self.http_ver = wc.proxy.get_http_version(self.protocol) # build request self.request = "%s %s %s" % (self.method, self.url, self.protocol) wc.log.debug(wc.LOG_PROXY, "%s request %r", self, self.request) # filter request attrs = wc.filter.get_filterattrs(self.url, wc.filter.STAGE_REQUEST) self.request = wc.filter.applyfilter(self.request, "finish", attrs) # final request checking if not self.fix_request(): return wc.log.info(wc.LOG_ACCESS, '%s - %s - %s', self.addr[0], time.ctime(time.time()), self.request) self.state = 'headers'
0b5979499cb4681b782053f64a61e20d99ddb896 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/0b5979499cb4681b782053f64a61e20d99ddb896/HttpClient.py
data = wc.filter.applyfilter("", "finish", attrs)
data = wc.filter.applyfilter(data, "finish", attrs)
def process_content (self): """read and filter client request content""" data = self.read(self.bytes_remaining) if self.bytes_remaining is not None: # Just pass everything through to the server # NOTE: It's possible to have 'chunked' encoding here, # and then the current system of counting bytes remaining # won't work; we have to deal with chunks self.bytes_remaining -= len(data) is_closed = False for decoder in self.decoders: data = decoder.decode(data) if not is_closed: is_closed = decoder.closed for stage in FilterStages: attrs = wc.filter.get_filterattrs(self.url, stage, clientheaders=self.clientheaders, headers=self.headers) data = wc.filter.applyfilter(data, "filter", attrs) self.content += data underflow = self.bytes_remaining is not None and \ self.bytes_remaining < 0 if underflow: wc.log.warn(wc.LOG_PROXY, "client received %d bytes more than content-length", -self.bytes_remaining) if is_closed or self.bytes_remaining <= 0: for stage in FilterStages: attrs = wc.filter.get_filterattrs(self.url, stage, clientheaders=self.clientheaders, headers=self.headers) data = wc.filter.applyfilter("", "finish", attrs) self.content += data if self.content and not self.headers.has_key('Content-Length'): self.headers['Content-Length'] = "%d\r" % len(self.content) # We're done reading content self.state = 'receive' is_local = self.hostname in \ wc.proxy.dns_lookups.resolver.localhosts and \ self.port in (wc.configuration.config['port'], wc.configuration.config['sslport']) if is_local: is_public_doc = self.allow.public_document(self.document) if wc.configuration.config['adminuser'] and \ not wc.configuration.config['adminpass']: if is_local and is_public_doc: self.handle_local(is_public_doc=is_public_doc) else: # ignore request, must init admin password self.headers['Location'] = \ "http://%s:%d/adminpass.html\r" % \ (self.socket.getsockname()[0], wc.configuration.config['port']) self.error(302, _("Moved Temporarily")) elif is_local: # this is a direct proxy call self.handle_local(is_public_doc=is_public_doc) else: self.server_request()
0b5979499cb4681b782053f64a61e20d99ddb896 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/0b5979499cb4681b782053f64a61e20d99ddb896/HttpClient.py
return (2L<<n-1)-1
return (1L << (32 - n)) - 1
def suffix2mask (n): "return a mask of n bits as a long integer" return (2L<<n-1)-1
179c526dadd676176a6723da8f3b6afbeee4f864 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/179c526dadd676176a6723da8f3b6afbeee4f864/ip.py
return int(math.log(mask+1, 2))
return 32 - int(math.log(mask+1, 2))
def mask2suffix (mask): """return suff for given bit mask""" return int(math.log(mask+1, 2))
179c526dadd676176a6723da8f3b6afbeee4f864 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/179c526dadd676176a6723da8f3b6afbeee4f864/ip.py
hosts = ["192.168.1.1/16"] hostmap = hosts2map(hosts) print hostmap print map2hosts(hostmap)
hosts, nets = hosts2map([ "192.168.2.1", "192.168.2.1/32", "192.168.2.1/31", "192.168.2.1/30", "192.168.2.1/29", "192.168.2.1/28", "192.168.2.1/27", "192.168.2.1/26", "192.168.2.1/25", "192.168.2.1/24", "192.168.2.1/23", "192.168.2.1/22", "192.168.2.1/21", "192.168.2.1/20", "192.168.2.1/19", "192.168.2.1/18", "192.168.2.1/17", "192.168.2.1/16", "192.168.2.1/15", "192.168.2.1/14", "192.168.2.1/13", "192.168.2.1/12", "192.168.2.1/11", "192.168.2.1/10", "192.168.2.1/9", "192.168.2.1/8", "192.168.2.1/7", "192.168.2.1/6", "192.168.2.1/5", "192.168.2.1/4", "192.168.2.1/3", "192.168.2.1/2", "192.168.2.1/1", "127.0.0.1/8" ]) for host in hosts: print "host: %s" % (host) for net, mask in nets: print "net: %s %s => %s/%s" % (net, mask, num2dq(net), mask2suffix(mask)) maps = map2hosts([hosts, nets]) for map in maps: print "map: %s" % (map)
def _test (): hosts = ["192.168.1.1/16"] hostmap = hosts2map(hosts) print hostmap print map2hosts(hostmap)
179c526dadd676176a6723da8f3b6afbeee4f864 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/179c526dadd676176a6723da8f3b6afbeee4f864/ip.py
if buf.len >= self.minimal_size_bytes:
if buf.tell() >= self.minimal_size_bytes:
def filter (self, data, **attrs): """feed data to recognizer""" if not attrs.has_key('mimerecognizer_buf'): return data buf = attrs['mimerecognizer_buf'] buf.write(data) if buf.len >= self.minimal_size_bytes: return self.recognize(buf, attrs) return ''
0b7fc560693771de1ffdef9c3491de2a81d2f28d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/0b7fc560693771de1ffdef9c3491de2a81d2f28d/MimeRecognizer.py
if not self.allow.scheme(self.scheme): wc.log.warn(wc.LOG_PROXY, "%s forbidden scheme %r encountered", self, self.scheme)
if not self.allow.is_allowed(self.method, self.scheme, self.port): wc.log.warn(wc.LOG_PROXY, "Unallowed request %s", self.url)
def fix_request (self): # refresh with filtered request data self.method, self.url, self.protocol = self.request.split() # enforce a maximum url length if len(self.url) > 2048: wc.log.error(wc.LOG_PROXY, "%s request url length %d chars is too long", self, len(self.url)) self.error(400, _("URL too long"), txt=_('URL length limit is %d bytes.') % 2048) return False if len(self.url) > 255: wc.log.warn(wc.LOG_PROXY, "%s request url length %d chars is very long", self, len(self.url)) # and unquote again self.url = wc.url.url_norm(self.url)[0] self.scheme, self.hostname, self.port, self.document = \ wc.url.url_split(self.url) # fix missing trailing / if not self.document: self.document = '/' # some clients send partial URI's without scheme, hostname # and port to clients, so we have to handle this if not self.scheme: self.scheme = "https" if not self.allow.scheme(self.scheme): wc.log.warn(wc.LOG_PROXY, "%s forbidden scheme %r encountered", self, self.scheme) self.error(403, _("Forbidden")) return False # request is ok return True
3f48a0b18b6b39a0a0a09266b96fc04edc715522 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/3f48a0b18b6b39a0a0a09266b96fc04edc715522/SslClient.py
wc.log.error(wc.LOG_PROXY, "%s got %s status %d %r",
wc.log.debug(wc.LOG_PROXY, "%s got %s status %d %r",
def server_response (self, server, response, status, headers): """ Follow redirects, and finish on errors. For HTTP status 2xx continue. """ self.server = server assert self.server.connected wc.log.debug(wc.LOG_PROXY, '%s server_response %r', self, response) version, status, msg = \ wc.http.parse_http_response(response, self.args[0]) # XXX check version wc.log.debug(wc.LOG_PROXY, '%s response %s %d %s', self, version, status, msg) if status in (302, 301): self.isredirect = True elif not (200 <= status < 300): wc.log.error(wc.LOG_PROXY, "%s got %s status %d %r", self, version, status, msg) self.finish() if headers.has_key('Transfer-Encoding'): # XXX don't look at value, assume chunked encoding for now wc.log.debug(wc.LOG_PROXY, '%s Transfer-encoding %r', self, headers['Transfer-encoding']) unchunker = wc.proxy.decoder.UnchunkStream.UnchunkStream(self) self.decoders.append(unchunker)
3810696c039c27969f92cf38c95ee96c6da66e10 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/3810696c039c27969f92cf38c95ee96c6da66e10/HttpProxyClient.py
"""execute given script"""
""" Execute given script. """
def execute (pythonw, script, args): """execute given script""" cargs = " ".join(args) _in, _out = os.popen4("%s %s %s" % (pythonw, script, cargs)) line = _out.readline() while line: print line line = _out.readline() _in.close() _out.close()
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""fix install and config paths in the config file"""
""" Fix install and config paths in the config file. """
def fix_configdata (): """fix install and config paths in the config file""" name = "_webcleaner2_configdata.py" conffile = os.path.join(sys.prefix, "Lib", "site-packages", name) lines = [] for line in file(conffile): if line.startswith("install_") or line.startswith("config_"): lines.append(fix_install_path(line)) else: lines.append(line) f = file(conffile, "w") f.write("".join(lines)) f.close()
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
if line.startswith("install_") or line.startswith("config_"):
if line.startswith("install_") or \ line.startswith("config_") or \ line.startswith("template_"):
def fix_configdata (): """fix install and config paths in the config file""" name = "_webcleaner2_configdata.py" conffile = os.path.join(sys.prefix, "Lib", "site-packages", name) lines = [] for line in file(conffile): if line.startswith("install_") or line.startswith("config_"): lines.append(fix_install_path(line)) else: lines.append(line) f = file(conffile, "w") f.write("".join(lines)) f.close()
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""Replace placeholders written by bdist_wininst with those specified in win_path_scheme."""
""" Replace placeholders written by bdist_wininst with those specified in win_path_scheme. """
def fix_install_path (line): """Replace placeholders written by bdist_wininst with those specified in win_path_scheme.""" key, eq, val = line.split() # unescape string (do not use eval()) val = val[1:-1].replace("\\\\", "\\") for d in win_path_scheme.keys(): # look for placeholders to replace oldpath, newpath = win_path_scheme[d] oldpath = "%s%s" % (os.sep, oldpath) if oldpath in val: val = val.replace(oldpath, newpath) val = os.path.join(sys.prefix, val) return "%s = %r%s" % (key, val, os.linesep)
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""install shortcuts and NT service"""
""" Install shortcuts and NT service. """
def do_install (): """install shortcuts and NT service""" fix_configdata() import wc # initialize i18n wc.init_i18n() install_shortcuts() install_certificates() install_service() restart_service() open_browser_config()
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""create_shortcut(target, description, filename[, arguments[, \ workdir[, iconpath[, iconindex]]]]) file_created(path) - register 'path' so that the uninstaller removes it directory_created(path) - register 'path' so that the uninstaller removes it get_special_folder_location(csidl_string)
""" create_shortcut(target, description, filename[, arguments[, \ workdir[, iconpath[, iconindex]]]]) file_created(path) - register 'path' so that the uninstaller removes it directory_created(path) - register 'path' so that the uninstaller removes it get_special_folder_location(csidl_string)
def install_shortcuts (): """create_shortcut(target, description, filename[, arguments[, \ workdir[, iconpath[, iconindex]]]]) file_created(path) - register 'path' so that the uninstaller removes it directory_created(path) - register 'path' so that the uninstaller removes it get_special_folder_location(csidl_string) """ try: prg = get_special_folder_path("CSIDL_COMMON_PROGRAMS") except OSError: try: prg = get_special_folder_path("CSIDL_PROGRAMS") except OSError, reason: # give up - cannot install shortcuts print _("Cannot install shortcuts: %s") % reason sys.exit() lib_dir = distutils.sysconfig.get_python_lib(plat_specific=1) dest_dir = os.path.join(prg, "WebCleaner") try: os.mkdir(dest_dir) directory_created(dest_dir) except OSError: pass target = os.path.join(sys.prefix, "RemoveWebCleaner.exe") path = os.path.join(dest_dir, "Uninstall WebCleaner.lnk") arguments = "-u " + os.path.join(sys.prefix, "WebCleaner-wininst.log") create_shortcut(target, _("Uninstall WebCleaner"), path, arguments) file_created(path)
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""generate SSL certificates for SSL gateway functionality"""
""" Generate SSL certificates for SSL gateway functionality. """
def install_certificates (): """generate SSL certificates for SSL gateway functionality""" pythonw = os.path.join(sys.prefix, "pythonw.exe") import wc script = os.path.join(wc.ScriptDir, "webcleaner-certificates") execute(pythonw, script, ["install"])
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""return status of NT service"""
""" Return status of NT service. """
def state_nt_service (name): """return status of NT service""" try: return win32serviceutil.QueryServiceStatus(name)[1] except pywintypes.error, msg: print _("Service status error: %s") % str(msg) return None
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""install WebCleaner as NT service"""
""" Install WebCleaner as NT service. """
def install_service (): """install WebCleaner as NT service""" import wc import wc.win32start oldargs = sys.argv print _("Installing %s service...") % wc.AppName sys.argv = ['webcleaner', 'install'] win32serviceutil.HandleCommandLine(wc.win32start.ProxyService) sys.argv = oldargs
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""restart WebCleaner NT service"""
""" Restart WebCleaner NT service. """
def restart_service (): """restart WebCleaner NT service""" stop_service() start_service()
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""stop WebCleaner NT service (if it is running)"""
""" Stop WebCleaner NT service (if it is running). """
def stop_service (): """stop WebCleaner NT service (if it is running)""" import wc import wc.win32start print _("Stopping %s proxy...") % wc.AppName oldargs = sys.argv state = state_nt_service(wc.AppName) while state==win32service.SERVICE_START_PENDING: time.sleep(1) state = state_nt_service(wc.AppName) if state==win32service.SERVICE_RUNNING: sys.argv = ['webcleaner', 'stop'] win32serviceutil.HandleCommandLine(wc.win32start.ProxyService) state = state_nt_service(wc.AppName) while state==win32service.SERVICE_STOP_PENDING: time.sleep(1) state = state_nt_service(wc.AppName) sys.argv = oldargs
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""start WebCleaner NT service"""
""" Start WebCleaner NT service. """
def start_service (): """start WebCleaner NT service""" import wc import wc.win32start print _("Starting %s proxy...") % wc.AppName oldargs = sys.argv sys.argv = ['webcleaner', 'start'] win32serviceutil.HandleCommandLine(wc.win32start.ProxyService) sys.argv = oldargs
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""stop and remove the installed NT service"""
""" Stop and remove the installed NT service. """
def do_remove (): """stop and remove the installed NT service""" import wc # initialize i18n wc.init_i18n() stop_service() remove_service() remove_certificates() remove_tempfiles()
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""generate SSL certificates for SSL gateway functionality"""
""" Generate SSL certificates for SSL gateway functionality. """
def remove_certificates (): """generate SSL certificates for SSL gateway functionality""" import wc pythonw = os.path.join(sys.prefix, "pythonw.exe") script = os.path.join(wc.ScriptDir, "webcleaner-certificates") execute(pythonw, script, ["remove"])
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""remove log files and magic(1) cache file"""
""" Remove log files and magic(1) cache file. """
def remove_tempfiles (): """remove log files and magic(1) cache file""" import wc remove_file(os.path.join(wc.ConfigDir, "magic.mime.mgc")) remove_file(os.path.join(wc.ConfigDir, "webcleaner.log")) remove_file(os.path.join(wc.ConfigDir, "webcleaner-access.log"))
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
"""Remove a single file if it exists. Errors are printed to stdout"""
""" Remove a single file if it exists. Errors are printed to stdout. """
def remove_file (fname): """Remove a single file if it exists. Errors are printed to stdout""" if os.path.exists(fname): try: os.remove(fname) except OSError, msg: print _("Could not remove %r: %s") % (fname, str(msg))
81119c56714873e5e119efdaaf15811708e582a2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/81119c56714873e5e119efdaaf15811708e582a2/install-webcleaner.py
curfolder.oid = len(config['folderrules'])
if not config['folderrules']: curfolder.oid = 0 else: curfolder.oid = config['folderrules'][-1].oid+1
def _form_newfolder (foldername): if not foldername: error['newfolder'] = True return fd, filename = tempfile.mkstemp(".zap", "local_", ConfigDir, text=True) # select the new folder global curfolder curfolder = _FolderRule(title=foldername, desc="", disable=0, filename=filename) _register_rule(curfolder) _generate_sids(prefix="lc") curfolder.oid = len(config['folderrules']) curfolder.write() config['folderrules'].append(curfolder) _recalc_up_down(config['folderrules']) info['newfolder'] = True
d85d22191778f4e74c1c0eb992e31192d4dcd655 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d85d22191778f4e74c1c0eb992e31192d4dcd655/filterconfig_html.py
curfolder.rules.remove(rule)
rules = curfolder.rules rules.remove(rule) for i in range(rule.oid, len(rules)): rules[i].oid = i curfolder.write()
def _form_removerule (rule): # XXX error handling curfolder.rules.remove(rule) global currule currule = None curfolder.write() info['removerule'] = True
d85d22191778f4e74c1c0eb992e31192d4dcd655 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d85d22191778f4e74c1c0eb992e31192d4dcd655/filterconfig_html.py
curfolder.write()
def _form_removerule (rule): # XXX error handling curfolder.rules.remove(rule) global currule currule = None curfolder.write() info['removerule'] = True
d85d22191778f4e74c1c0eb992e31192d4dcd655 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d85d22191778f4e74c1c0eb992e31192d4dcd655/filterconfig_html.py
def create_tcp_socket (self, sockinfo):
def create_tcp_socket (self):
def create_tcp_socket (self, sockinfo): """create tcp socket, connect to it and return socket object""" host = self.get('TCPAddr', 'localhost') port = int(self['TCPSocket']) sockinfo = get_sockinfo(host, port=port) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect(sockinfo[0][4]) except socket.error: sock.close() raise return sock
fbe82f45d1f7dc122479c95f1f25e79f5b82c94f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/fbe82f45d1f7dc122479c95f1f25e79f5b82c94f/VirusFilter.py
"charset": wc.ConfigCharset,
"charset": wc.configuration.ConfigCharset,
def write_filters (res, filename): if os.path.exists(filename): remove(filename) zapfile = file(filename, 'w') d = { "charset": wc.ConfigCharset, "title_en": wc.XmlUtils.xmlquote("AdZapper filters"), "title_de": wc.XmlUtils.xmlquote("AdZapper Filter"), "desc_en": wc.XmlUtils.xmlquote("Automatically generated by adzap2wc.py from %s on %s"%(ADZAPPER_URL, date)), "desc_de": wc.XmlUtils.xmlquote("Automatisch erzeugt von adzap2wc.py aus %s am %s"%(ADZAPPER_URL, date)), } zapfile.write("""<?xml version="1.0" encoding="%(charset)s"?>
10fbec766c6fdc09bb7beaaafbcf1e82e476506d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/10fbec766c6fdc09bb7beaaafbcf1e82e476506d/adzap2wc.py
p.feed("""<!------>""")
s = """< a>""" for c in s: p.feed(c)
def _broken (): p = HtmlPrinter() p.feed("""<!------>""") p.flush()
4096614ec2fab547e4cf4f2979eaaf05b83ca734 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4096614ec2fab547e4cf4f2979eaaf05b83ca734/htmllib.py
data_url = "& "C:\\foo.mht!${PATH}/"+ "EXPLOIT.CHM::"+
data_url = "& "C:\\foo.mht!${PATH}/"+ \ "EXPLOIT.CHM::"+ \
def testITSVuln (self): """Microsoft Internet Explorer ITS Protocol Zone Bypass Vulnerability""" # To avoid virus alarms we obfuscate the exploit URL. This # code is harmless. data_url = "&#109;s-its:mhtml:file://"+ "C:\\foo.mht!${PATH}/"+ "EXPLOIT.CHM::"+ "/exploit.htm" self.filt("""<object data="%s">""" % data_url, """<object data="ms-its:mhtml:file:/C:/foo.mht">""")
cf09ec498cba49bec5c366a13955af7a91d58577 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cf09ec498cba49bec5c366a13955af7a91d58577/test_rewriter.py
for ro in self.mimes: if ro.match(mime): self.mime_cache[mime] = True else: self.mime_cache[mime] = False
self.mime_cache[mime] = \ [ro for ro in self.mimes if ro.match(mime)]
def applies_to_mime (self, mime): """ Ask if this filter applies to a mime type. """ if mime not in self.mime_cache: if not self.mimes: self.mime_cache[mime] = True elif mime is None: self.mime_cache[mime] = False else: for ro in self.mimes: if ro.match(mime): self.mime_cache[mime] = True else: self.mime_cache[mime] = False return self.mime_cache[mime]
3d81ef47481267e015965da390e62c30be79dfe1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/3d81ef47481267e015965da390e62c30be79dfe1/Filter.py
if not data: return debug(HURT_ME_PLENTY, 'Proxy: read', len(data), '<=', self)
def handle_read (self): if not self.connected: # It's been closed (presumably recently) return
385bab61b974de70f2da2c471b99a104afebcd77 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/385bab61b974de70f2da2c471b99a104afebcd77/Connection.py
raise wc.filter.FilterProxyError((406, _("Not acceptable"),
raise wc.filter.FilterProxyError(406, _("Not acceptable"),
def size_error (self): """ Raise an exceptionto cause a 406 HTTP return code. """ wc.log.warn(wc.LOG_FILTER, "Virus filter size exceeded.") raise wc.filter.FilterProxyError((406, _("Not acceptable"), _("Maximum data size (%s) exceeded") % \ wc.strformat.strsize(VirusFilter.MAX_FILE_BYTES)))
4ea7be40c57fde2a9f47c265aaf1c51027a15dc2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4ea7be40c57fde2a9f47c265aaf1c51027a15dc2/VirusFilter.py
wc.strformat.strsize(VirusFilter.MAX_FILE_BYTES)))
wc.strformat.strsize(VirusFilter.MAX_FILE_BYTES))
def size_error (self): """ Raise an exceptionto cause a 406 HTTP return code. """ wc.log.warn(wc.LOG_FILTER, "Virus filter size exceeded.") raise wc.filter.FilterProxyError((406, _("Not acceptable"), _("Maximum data size (%s) exceeded") % \ wc.strformat.strsize(VirusFilter.MAX_FILE_BYTES)))
4ea7be40c57fde2a9f47c265aaf1c51027a15dc2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4ea7be40c57fde2a9f47c265aaf1c51027a15dc2/VirusFilter.py
print "XXX new rule", rule
def _form_newrule (rtype, lang): if rtype not in rulenames: error['newrule'] = True return # add new rule rule = _GetRuleFromName(rtype) rule.parent = curfolder rule.titles[lang] = _("No title") # compile data and register rule.compile_data() if config['development']: prefix = u"wc" else: prefix = u"lc" _generate_sids(prefix) curfolder.append_rule(rule) _recalc_up_down(curfolder.rules) curfolder.write() _reinit_filters() # select new rule _form_selrule(rule.oid) info['newrule'] = True print "XXX new rule", rule
d9adc60830d00bc259a35e334a4db2e994f2072d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d9adc60830d00bc259a35e334a4db2e994f2072d/filterconfig_html.py
"unsupported content encoding in %r", encoding)
"unsupported content encoding in %r", cenc)
def server_set_encoding_headers (server, filename=None): """ Set encoding headers. """ rewrite = server.is_rewrite() bytes_remaining = get_content_length(server.headers) to_remove = sets.Set() if server.headers.has_key('Transfer-Encoding'): to_remove.add('Transfer-Encoding') tencs = server.headers['Transfer-Encoding'].lower() for tenc in tencs.split(","): tenc = tenc.strip() if ";" in tenc: tenc = tenc.split(";", 1)[0] if not tenc or tenc == 'identity': continue if tenc == 'chunked': server.decoders.append(UnchunkStream.UnchunkStream(server)) elif tenc in ('x-gzip', 'gzip'): server.decoders.append(GunzipStream.GunzipStream(server)) elif tenc == 'deflate': server.decoders.append(DeflateStream.DeflateStream(server)) else: wc.log.warn(wc.LOG_PROXY, "unsupported transfer encoding in %r", tencs) if server.headers.has_key("Content-Length"): wc.log.warn(wc.LOG_PROXY, 'Transfer-Encoding should not have Content-Length') to_remove.add("Content-Length") bytes_remaining = None if rewrite: to_remove.add('Content-Length') remove_headers(server.headers, to_remove) if not server.headers.has_key('Content-Length'): server.headers['Connection'] = 'close\r' if not rewrite: # only decompress on rewrite return bytes_remaining to_remove = sets.Set() #if server.protocol == "HTTP/1.1": # # To make pipelining possible, enable chunked encoding. # server.headers['Transfer-Encoding'] = "chunked\r" # server.encoders.append(ChunkStream.ChunkStream(server)) # Compressed content (uncompress only for rewriting modules) if server.headers.has_key('Content-Encoding'): to_remove.add('Content-Encoding') cencs = server.headers['Content-Encoding'].lower() for cenc in cencs.split(","): cenc = cenc.strip() if ";" in cenc: cenc = cenc.split(";", 1)[0] if not cenc or cenc == 'identity': continue if filename is not None and \ (filename.endswith(".gz") or filename.endswith(".tgz")): continue # note: do not gunzip .gz files if cenc in ('gzip', 'x-gzip'): server.decoders.append(GunzipStream.GunzipStream()) elif cenc == 'deflate': server.decoders.append(DeflateStream.DeflateStream()) else: wc.log.warn(wc.LOG_PROXY, "unsupported content encoding in %r", encoding) # remove no-transform cache control if server.headers.get('Cache-Control', '').lower() == 'no-transform': to_remove.add('Cache-Control') # add warning server.headers['Warning'] = "214 Transformation applied\r" remove_headers(server.headers, to_remove) return bytes_remaining
e03289976f0bcc678d46da69aa8f94ace230fdf2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e03289976f0bcc678d46da69aa8f94ace230fdf2/Headers.py
if data and self.statuscode != 407:
if data and self.statuscode != 407 and hasattr(self.client, "server_content"):
def flush (self): """ Flush data of decoders (if any) and filters and write it to the client. return True if flush was successful. """ assert None == wc.log.debug(wc.LOG_PROXY, "%s HttpServer.flush", self) if not self.statuscode and self.method != 'CONNECT': wc.log.warn(wc.LOG_PROXY, "%s flush without status", self) return True data = self.flush_coders(self.decoders) try: for stage in FilterStages: data = wc.filter.applyfilter(stage, data, "finish", self.attrs) except wc.filter.FilterWait, msg: assert None == wc.log.debug(wc.LOG_PROXY, "%s FilterWait %s", self, msg) # the filter still needs some data # to save CPU time make connection unreadable for a while self.set_unreadable(1.0) return False except wc.filter.FilterRating, msg: assert None == wc.log.debug(wc.LOG_PROXY, "%s FilterRating from content %s", self, msg) self._show_rating_deny(str(msg)) return True data = self.flush_coders(self.encoders, data=data) # the client might already have closed if not self.client: return if self.defer_data: self.defer_data = False self.client.server_response(self, self.response, self.statuscode, self.headers) if not self.client: return if data and self.statuscode != 407: self.client.server_content(data) return True
2587cda0d9e6af8d79e58281a1df4172573b051c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2587cda0d9e6af8d79e58281a1df4172573b051c/HttpServer.py
return p.flush()
return p.getoutput()
def filter (self, data, **attrs): if not attrs.has_key('rewriter_filter'): return data p = attrs['rewriter_filter'] p.feed(data) return p.flush()
cdd629b0e83c1aa1f79e7ba238d04956ce9feecc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cdd629b0e83c1aa1f79e7ba238d04956ce9feecc/Rewriter.py
def finish (self, data, **attrs): if not attrs.has_key('rewriter_filter'): return data p = attrs['rewriter_filter'] # note: feed even if data is empty p.feed(data) return p.flush(finish=True)
cdd629b0e83c1aa1f79e7ba238d04956ce9feecc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cdd629b0e83c1aa1f79e7ba238d04956ce9feecc/Rewriter.py
return p.flush(finish=True)
p.flush() p.tagbuf2data() return p.getoutput()
def finish (self, data, **attrs): if not attrs.has_key('rewriter_filter'): return data p = attrs['rewriter_filter'] # note: feed even if data is empty p.feed(data) return p.flush(finish=True)
cdd629b0e83c1aa1f79e7ba238d04956ce9feecc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cdd629b0e83c1aa1f79e7ba238d04956ce9feecc/Rewriter.py
return unicode(htmlentitydefs.entitydefs.get(ent, s))
entdef = htmlentitydefs.entitydefs.get(ent) if entdef is None: return s return entdef.decode("iso8859-1")
def _resolve_html_entity (mo): """resolve html entity, helper function for resolve_html_entities""" ent = mo.group("entity") s = mo.group() return unicode(htmlentitydefs.entitydefs.get(ent, s))
1311d3e1434fb8dc1177f52dee970e17f22ef1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/1311d3e1434fb8dc1177f52dee970e17f22ef1ca/__init__.py
"""send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n'
"""serve JavaScript files""" self.server.log.write("server got request path %r\n"%self.path) if not jsfiles.has_key(self.path): data = "HTTP/1.1 404 Oops\r\n" body = "" else: data = 'HTTP/1.1 200 OK\r\n' body = jsfiles[self.path]
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
e2d4a29e84025b0de5df75617136d4f35f55fdca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e2d4a29e84025b0de5df75617136d4f35f55fdca/TestScriptSrc.py
data += "Transfer-Encoding: chunked\r\n"
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
e2d4a29e84025b0de5df75617136d4f35f55fdca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e2d4a29e84025b0de5df75617136d4f35f55fdca/TestScriptSrc.py
data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n"
data += body
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
e2d4a29e84025b0de5df75617136d4f35f55fdca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e2d4a29e84025b0de5df75617136d4f35f55fdca/TestScriptSrc.py
class ChunkRequest (HttpRequest): def check_response (self, response): """check for 200 status and correct body data length""" if response.status!=200: return (self.VIOLATION, "Invalid HTTP status %r"%response.status) body = response.read() if len(body) != ChunkRequestHandler.body_length: return (self.VIOLATION, "Expected %d bytes in the body, but got %d bytes instead:\n%r" %\ (ChunkRequestHandler.body_length, len(body), body)) return (self.SUCCESS, "Ok")
class TestScriptSrc (StandardTest): """All these tests work with a _default_ filter configuration. If you change any of the *.zap filter configs, tests can fail..."""
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
e2d4a29e84025b0de5df75617136d4f35f55fdca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e2d4a29e84025b0de5df75617136d4f35f55fdca/TestScriptSrc.py
def name (self): return 'chunked-leading-zeros'
def init (self): wc.config = wc.Configuration() disable_rating_rules(wc.config) wc.config['filters'] = ['Rewriter',] wc.config.init_filter_modules() initlog(os.path.join("test", "logging.conf")) self.headers = WcMessage() self.headers['Content-Type'] = "text/html" self.log = sys.stdout self.serverthread = HttpServer.startServer(self.log, handler_class=JSRequestHandler) def shutdown (self): """Stop server, close log""" HttpServer.stopServer(self.log) def filt (self, data, result, name=""): attrs = get_filterattrs(name, [FILTER_RESPONSE_MODIFY], headers=self.headers) filtered = "" try: filtered += applyfilter(FILTER_RESPONSE_MODIFY, data, 'filter', attrs) except FilterException, msg: pass i = 1 while 1: try: filtered += applyfilter(FILTER_RESPONSE_MODIFY, "", 'finish', attrs) break except FilterException, msg: proxy_poll(timeout=max(0, run_timers())) i+=1 if i==100: raise FilterException("Slow") self.assertEqual(filtered, result)
def name (self): return 'chunked-leading-zeros'
e2d4a29e84025b0de5df75617136d4f35f55fdca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e2d4a29e84025b0de5df75617136d4f35f55fdca/TestScriptSrc.py
class TestChunkedEncoding (ProxyTest): def __init__ (self, methodName='runTest'): ProxyTest.__init__(self, methodName=methodName) request = ChunkRequest() self.addTest(request, handler_class=ChunkRequestHandler)
def testScriptSrc1 (self): self.filt( """<script src="http://localhost:%d/1.js"></script> </html>""" % HttpServer.defaultconfig['port'], """<script type="text/javascript"> <!-- %s//--> </script> </html>""" % jsfiles['/1.js']) def testScriptSrc2 (self): self.filt( """<script src="http://localhost:%d/1.js"> </script> </html>""" % HttpServer.defaultconfig['port'], """<script type="text/javascript"> <!-- %s//--> </script> </html>""" % jsfiles['/1.js']) def testScriptSrc3 (self): """missing </script>""" self.filt( """<script src="http://localhost:%d/3.js"/> <script type="JavaScript"> <!-- a = 1 //--> </script> </html>""" % HttpServer.defaultconfig['port'], """ <script type="JavaScript"> <!-- a = 1 //--> </script> </html>""") def testRecursionSrc (self): self.filt( """<script language="JavaScript"> <!-- document.write('<SCR'+'IPT LANGUAGE="JavaScript1.1" ' ); document.write('SRC="http://localhost:%d/2.js">'); document.write('</SCR'+'IPT>'); //--> </script> </td> </tr> </table>""" % HttpServer.defaultconfig['port'], """ </td> </tr> </table>""")
def name (self): return 'chunked-leading-zeros'
e2d4a29e84025b0de5df75617136d4f35f55fdca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e2d4a29e84025b0de5df75617136d4f35f55fdca/TestScriptSrc.py
unittest.main(defaultTest='TestChunkedEncoding')
unittest.main(defaultTest='TestScriptSrc')
def __init__ (self, methodName='runTest'): ProxyTest.__init__(self, methodName=methodName) request = ChunkRequest() self.addTest(request, handler_class=ChunkRequestHandler)
e2d4a29e84025b0de5df75617136d4f35f55fdca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e2d4a29e84025b0de5df75617136d4f35f55fdca/TestScriptSrc.py
suite = unittest.makeSuite(TestChunkedEncoding, 'test')
suite = unittest.makeSuite(TestScriptSrc, 'test')
def __init__ (self, methodName='runTest'): ProxyTest.__init__(self, methodName=methodName) request = ChunkRequest() self.addTest(request, handler_class=ChunkRequestHandler)
e2d4a29e84025b0de5df75617136d4f35f55fdca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e2d4a29e84025b0de5df75617136d4f35f55fdca/TestScriptSrc.py
return self.recognize(buf)
return self.recognize(buf, attrs)
def finish (self, data, **attrs): """feed data to recognizer""" if not attrs.has_key('mimerecognizer_buf'): return data buf = attrs['mimerecognizer_buf'] buf.write(data) return self.recognize(buf)
528e381abada85d187d95d575ece7406b045d305 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/528e381abada85d187d95d575ece7406b045d305/MimeRecognizer.py
if mime != attrs['mime']:
if not attrs['mime'].startswith(mime):
def recognize (self, buf, attrs): # note: recognizing a mime type fixes exploits like # CVE-2002-0025 and CVE-2002-0024 try: mime = wc.magic.classify(buf) if mime != attrs['mime']: wc.log.warn(wc.LOG_FILTER, "Adjusting MIME %r -> %r", attrs['mime'], mime) attrs['headers']['data']['Content-Type'] = "%s\r" % mime except StandardError, msg: wc.log.exception(wc.LOG_FILTER, "Mime recognize error") data = buf.getvalue() buf.close() del attrs['mimerecognizer_buf'] return data
528e381abada85d187d95d575ece7406b045d305 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/528e381abada85d187d95d575ece7406b045d305/MimeRecognizer.py
del attrs['mimerecognizer_buf']
def recognize (self, buf, attrs): # note: recognizing a mime type fixes exploits like # CVE-2002-0025 and CVE-2002-0024 try: mime = wc.magic.classify(buf) if mime != attrs['mime']: wc.log.warn(wc.LOG_FILTER, "Adjusting MIME %r -> %r", attrs['mime'], mime) attrs['headers']['data']['Content-Type'] = "%s\r" % mime except StandardError, msg: wc.log.exception(wc.LOG_FILTER, "Mime recognize error") data = buf.getvalue() buf.close() del attrs['mimerecognizer_buf'] return data
528e381abada85d187d95d575ece7406b045d305 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/528e381abada85d187d95d575ece7406b045d305/MimeRecognizer.py
lib_dir = distutils.get_python_lib(plat_specific=1)
lib_dir = distutils.sysconfig.get_python_lib(plat_specific=1)
def install_shortcuts (): """create_shortcut(target, description, filename[, arguments[, \ workdir[, iconpath[, iconindex]]]]) file_created(path) - register 'path' so that the uninstaller removes it directory_created(path) - register 'path' so that the uninstaller removes it get_special_folder_location(csidl_string) """ try: prg = get_special_folder_path("CSIDL_COMMON_PROGRAMS") except OSError: try: prg = get_special_folder_path("CSIDL_PROGRAMS") except OSError, reason: # give up - cannot install shortcuts print "cannot install shortcuts: %s" % reason sys.exit() lib_dir = distutils.get_python_lib(plat_specific=1) dest_dir = os.path.join(prg, "WebCleaner") try: os.mkdir(dest_dir) directory_created(dest_dir) except OSError: pass target = os.path.join(sys.prefix, "RemoveWebCleaner.exe") path = os.path.join(dest_dir, "Uninstall WebCleaner.lnk") arguments = "-u " + os.path.join(sys.prefix, "WebCleaner-wininst.log") create_shortcut(target, "Uninstall WebCleaner", path, arguments) file_created(path)
9e8b76be5466419a2a36f0f2928e543824bbf067 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9e8b76be5466419a2a36f0f2928e543824bbf067/install-webcleaner.py
script = os.path.join(script_dir, "webcleaner-certificates")
script = os.path.join(wc.ScriptDir, "webcleaner-certificates")
def install_certificates (): """generate SSL certificates for SSL gateway functionality""" pythonw = os.path.join(sys.prefix, "pythonw.exe") script = os.path.join(script_dir, "webcleaner-certificates") execute(pythonw, script, ["install"])
9e8b76be5466419a2a36f0f2928e543824bbf067 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9e8b76be5466419a2a36f0f2928e543824bbf067/install-webcleaner.py
script = os.path.join(script_dir, "webcleaner-certificates")
script = os.path.join(wc.ScriptDir, "webcleaner-certificates")
def remove_certificates (): """generate SSL certificates for SSL gateway functionality""" pythonw = os.path.join(sys.prefix, "pythonw.exe") script = os.path.join(script_dir, "webcleaner-certificates") execute(pythonw, script, ["remove"])
9e8b76be5466419a2a36f0f2928e543824bbf067 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9e8b76be5466419a2a36f0f2928e543824bbf067/install-webcleaner.py
wc.log.warn(wc.LOG_JS,
wc.log.debug(wc.LOG_JS,
def js_end_element (self, item): """ Parse generated html for scripts. """ wc.log.debug(wc.LOG_JS, "%s js_end_element buf %r", self, self.htmlparser.tagbuf) if len(self.htmlparser.tagbuf)<2: # syntax error, ignore wc.log.warn(wc.LOG_JS, "JS syntax error, self.tagbuf %r", self.htmlparser.tagbuf) return if self.js_src: wc.log.debug(wc.LOG_JS, "JS src, self.tagbuf %r", self.htmlparser.tagbuf) del self.htmlparser.tagbuf[-1] if len(self.htmlparser.tagbuf)<2: # syntax error, ignore wc.log.warn(wc.LOG_JS, "JS end, self.tagbuf %s", self.htmlparser.tagbuf) return if len(self.htmlparser.tagbuf) > 2 and \ self.htmlparser.tagbuf[-3][0] == \ wc.filter.html.STARTTAG and \ self.htmlparser.tagbuf[-3][1] == 'script': del self.htmlparser.tagbuf[-1] if len(self.htmlparser.tagbuf)<2 or \ self.htmlparser.tagbuf[-1][0] != \ wc.filter.html.DATA or \ self.htmlparser.tagbuf[-2][0] != \ wc.filter.html.STARTTAG or \ self.htmlparser.tagbuf[-2][1] != 'script': # syntax error, ignore return js_ok, js_lang = wc.js.get_js_data(self.htmlparser.tagbuf[-2][2]) if not js_ok: # no JavaScript, add end tag and ignore self.htmlparser.tagbuf.append(item) return ver = wc.js.get_js_ver(js_lang) # get script data script = self.htmlparser.tagbuf[-1][1].strip() # remove html comments script = wc.js.remove_html_comments(script) if not script: # again, ignore an empty script del self.htmlparser.tagbuf[-1] del self.htmlparser.tagbuf[-1] return # put correctly quoted script data into buffer script = wc.js.clean(script, jscomments=self.jscomments) self.htmlparser.tagbuf[-1][1] = script # execute script self.jsScript(script, ver, item)
3c57546dd52f9655e1e20fb1db48f0f105d8d4e3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/3c57546dd52f9655e1e20fb1db48f0f105d8d4e3/JSFilter.py
res = {}
res = []
def parse_adzapper_file (filename): res = {} is_comment = re.compile('^\s*(#.*)?$').match content = False # skip content until __DATA__ marker for line in open(filename): if not content: content = line.startswith('__DATA__') elif not is_comment(line): parse_adzapper_line(line.strip(), res) return res
5fa0e81bf922324d404b7f57f5fdb223b9c2790a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5fa0e81bf922324d404b7f57f5fdb223b9c2790a/adzap2wc.py
adclass, pattern = line.split(None, 1) res.setdefault(adclass.lower(), []).append(pattern) def write_filters (ads):
res.append(line.split(None, 1)) def write_filters (res):
def parse_adzapper_line (line, res): adclass, pattern = line.split(None, 1) res.setdefault(adclass.lower(), []).append(pattern)
5fa0e81bf922324d404b7f57f5fdb223b9c2790a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5fa0e81bf922324d404b7f57f5fdb223b9c2790a/adzap2wc.py
for adclass, pattern in res.items(): pattern = convert_adzapper_pattern(pattern) if adclass=='pass': write_allow(pattern) elif adclass='print':
for adclass, pattern in res: if adclass=='NOZAP': continue elif adclass=='PASS': pattern = convert_adzapper_pattern(pattern) write_allow(zapfile, adclass, pattern) elif adclass=='PRINT':
def write_filters (ads): filename = os.path.join("config", "adzapper.zap") if os.path.exists(filename): remove(filename) zapfile = file(filename, 'w') d = {"title": xmlify("AdZapper filters"), "desc": xmlify("Automatically generated on %s" % date), } zapfile.write("""<?xml version="1.0"?>
5fa0e81bf922324d404b7f57f5fdb223b9c2790a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5fa0e81bf922324d404b7f57f5fdb223b9c2790a/adzap2wc.py
pattern = re.sub(r"[^.]*[^?]", pattern, "[^/]*")
pattern = re.sub(r"([^.])\*([^?])", r"\1[^/]*\2", pattern)
def convert_adzapper_pattern (pattern): pattern = pattern.replace(".", "\\.") pattern = pattern.replace("?", "\\?") pattern = pattern.replace("**", ".*?") pattern = re.sub(r"[^.]*[^?]", pattern, "[^/]*") return pattern
5fa0e81bf922324d404b7f57f5fdb223b9c2790a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5fa0e81bf922324d404b7f57f5fdb223b9c2790a/adzap2wc.py
replace = re.sub(r"$(\d)", replace, r"\\1")
replace = re.sub(r"\$(\d)", r"\\1", replace)
def convert_adzapper_replace (replace): # replace Perl back references with Python ones replace = re.sub(r"$(\d)", replace, r"\\1") return replace
5fa0e81bf922324d404b7f57f5fdb223b9c2790a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5fa0e81bf922324d404b7f57f5fdb223b9c2790a/adzap2wc.py
def write_allow (zapfile, pattern): title = "AdZapper PASS filter" desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value)
def write_allow (zapfile, adclass, pattern): d = get_rule_dict(adclass, pattern)
def write_allow (zapfile, pattern): title = "AdZapper PASS filter" desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<allow title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s"/>
5fa0e81bf922324d404b7f57f5fdb223b9c2790a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5fa0e81bf922324d404b7f57f5fdb223b9c2790a/adzap2wc.py
scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s"/>
url="%(url)s"
def write_allow (zapfile, pattern): title = "AdZapper PASS filter" desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<allow title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s"/>
5fa0e81bf922324d404b7f57f5fdb223b9c2790a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5fa0e81bf922324d404b7f57f5fdb223b9c2790a/adzap2wc.py
title = "AdZapper %s filter" % adclass desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value)
d = get_rule_dict(adclass, pattern)
def write_block (zapfile, adclass, pattern, replacement=None): title = "AdZapper %s filter" % adclass desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<block title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s" """ % d) if replacement: zapfile.write(">%(replacement)s</block>" % d) else: zapfile.write("/>") zapfile.write("\n")
5fa0e81bf922324d404b7f57f5fdb223b9c2790a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5fa0e81bf922324d404b7f57f5fdb223b9c2790a/adzap2wc.py
scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s" """ % d) if replacement: zapfile.write(">%(replacement)s</block>" % d)
url="%(url)s" """ % d) if replacement is not None: zapfile.write(">%s</block>" % xmlify(replacement))
def write_block (zapfile, adclass, pattern, replacement=None): title = "AdZapper %s filter" % adclass desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<block title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s" """ % d) if replacement: zapfile.write(">%(replacement)s</block>" % d) else: zapfile.write("/>") zapfile.write("\n")
5fa0e81bf922324d404b7f57f5fdb223b9c2790a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5fa0e81bf922324d404b7f57f5fdb223b9c2790a/adzap2wc.py
f = file(proxyconf_file())
f = file(proxyconf_file(), 'w')
def write_proxyconf (self): """write proxy configuration""" f = file(proxyconf_file()) f.write("""<?xml version="1.0"?>
d5245fe6916d96fbd7e23dec5cc460cf979205bc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d5245fe6916d96fbd7e23dec5cc460cf979205bc/__init__.py
"""
""")
def write_proxyconf (self): """write proxy configuration""" f = file(proxyconf_file()) f.write("""<?xml version="1.0"?>
d5245fe6916d96fbd7e23dec5cc460cf979205bc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d5245fe6916d96fbd7e23dec5cc460cf979205bc/__init__.py
if self.allowedhosts:
if self['allowedhosts']:
def write_proxyconf (self): """write proxy configuration""" f = file(proxyconf_file()) f.write("""<?xml version="1.0"?>
d5245fe6916d96fbd7e23dec5cc460cf979205bc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d5245fe6916d96fbd7e23dec5cc460cf979205bc/__init__.py
from glob import glob
def read_filterconf (self): """read filter rules""" from glob import glob # filter configuration for f in filterconf_files(): ZapperParser().parse(f, self) for f in self['rules']: f.sort() self['rules'].sort() filter.rules.FolderRule.recalc_oids(self['rules'])
d5245fe6916d96fbd7e23dec5cc460cf979205bc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d5245fe6916d96fbd7e23dec5cc460cf979205bc/__init__.py
if self.scheme != 'https':
if scheme != 'https':
def is_allowed (self, method, scheme, port): if not self.method(method): wc.log.warn(wc.LOG_PROXY, "illegal method %s", method) return False if scheme not in self.schemes: wc.log.warn(wc.LOG_PROXY, "illegal scheme %s", scheme) return False if method == 'CONNECT': # CONNECT method sanity if port not in self.connect_ports: wc.log.warn(wc.LOG_PROXY, "illegal CONNECT port %d", port) return False if self.scheme != 'https': wc.log.warn(wc.LOG_PROXY, "illegal CONNECT scheme %d", scheme) return False else: # all other methods if port not in self.http_ports: wc.log.warn(wc.LOG_PROXY, "illegal port %d", port) return False return True
681d37a2d61f13067f3a8870599c19392f8de763 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/681d37a2d61f13067f3a8870599c19392f8de763/Allowed.py
p.feed("<hTml>")
def _test(): p = HtmlPrinter() #p.feed("<hTml>") p.feed("<a href>") #p.feed("<a href=''>") #p.feed('<a href="">') #p.feed("<a href='a'>") #p.feed('<a href="a">') p.feed("<a href=a>") #p.feed("<a href='\"'>") #p.feed("<a href=\"'\">") #p.feed("<a href=' '>") #p.feed("<a href=a href=b>") #p.feed("<a/>") #p.feed("<a href/>") #p.feed("<a href=a />") #p.feed("</a>") #p.feed("<?bla foo?>") #p.feed("<?bla?>") #p.feed("<!-- - comment -->") #p.feed("<!---->") #p.feed("<!DOCTYPE \"vla foo>") p.flush()
77ac994009378cc133855202839a3176171dd82d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/77ac994009378cc133855202839a3176171dd82d/htmllib.py
p.feed("<a href=''>") p.feed('<a href="">') p.feed("<a href='a'>") p.feed('<a href="a">')
def _test(): p = HtmlPrinter() #p.feed("<hTml>") p.feed("<a href>") #p.feed("<a href=''>") #p.feed('<a href="">') #p.feed("<a href='a'>") #p.feed('<a href="a">') p.feed("<a href=a>") #p.feed("<a href='\"'>") #p.feed("<a href=\"'\">") #p.feed("<a href=' '>") #p.feed("<a href=a href=b>") #p.feed("<a/>") #p.feed("<a href/>") #p.feed("<a href=a />") #p.feed("</a>") #p.feed("<?bla foo?>") #p.feed("<?bla?>") #p.feed("<!-- - comment -->") #p.feed("<!---->") #p.feed("<!DOCTYPE \"vla foo>") p.flush()
77ac994009378cc133855202839a3176171dd82d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/77ac994009378cc133855202839a3176171dd82d/htmllib.py
p.feed("<a href='\"'>") p.feed("<a href=\"'\">") p.feed("<a href=' '>") p.feed("<a href=a href=b>") p.feed("<a/>") p.feed("<a href/>") p.feed("<a href=a />") p.feed("</a>") p.feed("<?bla foo?>") p.feed("<?bla?>") p.feed("<!-- - comment -->") p.feed("<!---->") p.feed("<!DOCTYPE \"vla foo>")
def _test(): p = HtmlPrinter() #p.feed("<hTml>") p.feed("<a href>") #p.feed("<a href=''>") #p.feed('<a href="">') #p.feed("<a href='a'>") #p.feed('<a href="a">') p.feed("<a href=a>") #p.feed("<a href='\"'>") #p.feed("<a href=\"'\">") #p.feed("<a href=' '>") #p.feed("<a href=a href=b>") #p.feed("<a/>") #p.feed("<a href/>") #p.feed("<a href=a />") #p.feed("</a>") #p.feed("<?bla foo?>") #p.feed("<?bla?>") #p.feed("<!-- - comment -->") #p.feed("<!---->") #p.feed("<!DOCTYPE \"vla foo>") p.flush()
77ac994009378cc133855202839a3176171dd82d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/77ac994009378cc133855202839a3176171dd82d/htmllib.py
p.feed("")
p.feed("<img bo\\\nrder=0>")
def _broken (): p = HtmlPrinter() p.feed("") p.flush()
77ac994009378cc133855202839a3176171dd82d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/77ac994009378cc133855202839a3176171dd82d/htmllib.py
_test()
_broken()
def _broken (): p = HtmlPrinter() p.feed("") p.flush()
77ac994009378cc133855202839a3176171dd82d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/77ac994009378cc133855202839a3176171dd82d/htmllib.py
if not challenge.startswith('NTLMSSP\x00'):
if "," in challenge: chal, remainder = challenge.split(",", 1) else: chal, remainder = challenge, "" chal = base64.decodestring(chal.strip()) if not chal.startswith('NTLMSSP\x00'): res['type'] = 0
def parse_ntlm_challenge (challenge): """parse both type0 and type2 challenges""" res = {} if not challenge.startswith('NTLMSSP\x00'): return res, challenge res['nonce'] = challenge[24:32] return res, challenge[40:]
9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4/ntlm.py
res['nonce'] = challenge[24:32] return res, challenge[40:]
res['nonce'] = chal[24:32] res['type'] = 2 return res, remainder.strip()
def parse_ntlm_challenge (challenge): """parse both type0 and type2 challenges""" res = {} if not challenge.startswith('NTLMSSP\x00'): return res, challenge res['nonce'] = challenge[24:32] return res, challenge[40:]
9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4/ntlm.py
pass
res = {} if "," in credentials: creds, remainder = credentials.split(",", 1) else: creds, remainder = credentials, "" creds = base64.decodestring(creds.strip()) if not creds.startswith('NTLMSSP\x00'): return res, remainder.strip() type = creds[8] if type==1: res['type'] = 1 domain_len = int(creds[16:18]) domain_off = int(creds[20:22]) host_len = int(creds[24:26]) host_off = int(creds[28:30]) res['host'] = creds[host_off:host_off+host_len] res['domain'] = creds[domain_off:domain_off+domain_len] elif type==3: res['type'] = 3 lm_res_len = int(creds[12:14]) else: return res, remainder.strip() return res, remainder.strip()
def parse_ntlm_credentials (credentials): """parse both type1 and type3 credentials""" # XXX pass
9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4/ntlm.py
def create_message2 (flags="\x82\x01"):
def create_message2 ():
def create_message2 (flags="\x82\x01"): protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4/ntlm.py
nonce = "%08f" % (random.random()*10)
zero2 = '\x00'*2 flags="\x82\x01" nonce = "%08d" % (random.random()*100000000)
def create_message2 (flags="\x82\x01"): protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4/ntlm.py
zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
zero8 = '\x00'*8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(flags)s%(zero2)s%(nonce)s%(zero8)s" % locals()
def create_message2 (flags="\x82\x01"): protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4/ntlm.py
protocol = 'NTLMSSP\000' type = '\003\000' head = protocol + type + '\000\000'
protocol = 'NTLMSSP\x00' type = '\x03' head = protocol + type + '\x00'*3
def create_message3 (nonce, domain, username, host, flags="\x82\x01", lm_hashed_pw=None, nt_hashed_pw=None, ntlm_mode=0): protocol = 'NTLMSSP\000' #name type = '\003\000' #type 3 head = protocol + type + '\000\000' domain_rec = record(domain) user_rec = record(username) host_rec = record(host) additional_rec = record('') if lm_hashed_pw: lm_rec = record(ntlm_procs.calc_resp(lm_hashed_pw, nonce)) else: lm_rec = record('') if nt_hashed_pw: nt_rec = record(ntlm_procs.calc_resp(nt_hashed_pw, nonce)) else: nt_rec = record('') # length of the head and five infos for LM, NT, Domain, User, Host domain_offset = len(head) + 5 * 8 # and unknown record info and flags' lenght if nltm_mode == 0: domain_offset = domain_offset + 8 + len(flags) # create info fields domain_rec.create_record_info(domain_offset) user_rec.create_record_info(domain_rec.next_offset) host_rec.create_record_info(user_rec.next_offset) lm_rec.create_record_info(host_rec.next_offset) nt_rec.create_record_info(lm_rec.next_offset) additional_rec.create_record_info(nt_rec.next_offset) # data part of the message 3 data_part = domain_rec.data + user_rec.data + host_rec.data + lm_rec.data + nt_rec.data # build message 3 m3 = head + lm_rec.record_info + nt_rec.record_info + \ domain_rec.record_info + user_rec.record_info + host_rec.record_info # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.record_info + flags m3 += data_part # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.data return m3
9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4/ntlm.py
def parse_message2 (msg2): msg2 = base64.decodestring(msg2) nonce = msg2[24:32] return nonce
def parse_message2 (msg2): msg2 = base64.decodestring(msg2) # protocol = msg2[0:7] # msg_type = msg2[7:9] nonce = msg2[24:32] return nonce
9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4/ntlm.py
def debug_message1 (msg): m_ = base64.decodestring(msg) m_hex = utils.str2hex(m_) res = '==============================================================\n' res += 'NTLM Message 1 report:\n' res += '---------------------------------\n' res += 'Base64: %s\n' % msg res += 'String: %s\n' % utils.str2prn_str(m_) res += 'Hex: %s\n' % m_hex cur = 0 res += '---------------------------------\n' cur_len = 12 res += 'Header %d/%d:\n%s\n\n' % (cur, cur_len, m_hex[0:24]) res += '%s\nmethod name 0/8\n%s res += '0x%s%s res += '%s cur += cur_len res += '---------------------------------\n' cur_len = 4 res += 'Flags %d/%d\n' % (cur, cur_len) res += flags(m_[cur: cur + cur_len]) cur += cur_len res += '---------------------------------\n' cur_len = len(m_) - cur res += 'Rest of the message %d/%d:\n' % (cur, cur_len) res += unknown_part(m_[cur: cur + cur_len]) res += '\nEnd of message 1 report.\n' return res def debug_message2 (msg): m_ = base64.decodestring(msg) m_hex = utils.str2hex(m_) res = '==============================================================\n' res += 'NTLM Message 2 report:\n' res += '---------------------------------\n' res += 'Base64: %s\n' % msg res += 'String: %s\n' % utils.str2prn_str(m_) res += 'Hex: %s\n' % m_hex cur = 0 res += '---------------------------------\n' cur_len = 12 res += 'Header %d/%d:\n%s\n\n' % (cur, cur_len, m_hex[0:24]) res += '%s\nmethod name 0/8\n%s res += '0x%s%s res += '%s cur += cur_len res += '---------------------------------\n' cur_len = 8 res += 'Lengths and Positions %d/%d\n%s\n\n' % (cur, cur_len, m_hex[cur * 2 :(cur + cur_len) * 2]) cur_len = 8 res += 'Domain ??? %d/%d\n' % (cur, cur_len) dom = item(m_[cur:cur+cur_len]) res += dom['string'] cur += cur_len res += '---------------------------------\n' cur_len = 4 res += 'Flags %d/%d\n' % (cur, cur_len) res += flags(m_[cur: cur + cur_len]) cur += cur_len res += '---------------------------------\n' cur_len = 8 res += 'NONCE %d/%d\n%s\n\n' % (cur, cur_len, m_hex[cur * 2 :(cur + cur_len) * 2]) cur += cur_len res += '---------------------------------\n' cur_len = dom['offset'] - cur res += 'Unknown data %d/%d:\n' % (cur, cur_len) res += unknown_part(m_[cur: cur + cur_len]) cur += cur_len res += '---------------------------------\n' cur_len = dom['len1'] res += 'Domain ??? %d/%d:\n' % (cur, cur_len) res += 'Hex: %s\n' % m_hex[cur * 2: (cur + cur_len) * 2] res += 'String: %s\n\n' % utils.str2prn_str(m_[cur : cur + cur_len]) cur += cur_len res += '---------------------------------\n' cur_len = len(m_) - cur res += 'Rest of the message %d/%d:\n' % (cur, cur_len) res += unknown_part(m_[cur: cur + cur_len]) res += '\nEnd of message 2 report.\n' return res def debug_message3 (msg): m_ = base64.decodestring(msg) m_hex = utils.str2hex(m_) res = '==============================================================\n' res += 'NTLM Message 3 report:\n' res += '---------------------------------\n' res += 'Base64: %s\n' % msg res += 'String: %s\n' % utils.str2prn_str(m_) res += 'Hex: %s\n' % m_hex cur = 0 res += '---------------------------------\n' cur_len = 12 res += 'Header %d/%d:\n%s\n\n' % (cur, cur_len, m_hex[0:24]) res += '%s\nmethod name 0/8\n%s res += '0x%s%s res += '%s cur += cur_len res += '---------------------------------\n' cur_len = 48 res += 'Lengths and Positions %d/%d\n%s\n\n' % (cur, cur_len, m_hex[cur * 2 :(cur + cur_len) * 2]) cur_len = 8 res += 'LAN Manager response %d/%d\n' % (cur, cur_len) lmr = item(m_[cur:cur+cur_len]) res += lmr['string'] cur += cur_len cur_len = 8 res += 'NT response %d/%d\n' % (cur, cur_len) ntr = item(m_[cur:cur+cur_len]) res += ntr['string'] cur += cur_len cur_len = 8 res += 'Domain string %d/%d\n' % (cur, cur_len) dom = item(m_[cur:cur+cur_len]) res += dom['string'] cur += cur_len cur_len = 8 res += 'User string %d/%d\n' % (cur, cur_len) username = item(m_[cur:cur+cur_len]) res += username['string'] cur += cur_len cur_len = 8 res += 'Host string %d/%d\n' % (cur, cur_len) host = item(m_[cur:cur+cur_len]) res += host['string'] cur += cur_len cur_len = 8 res += 'Unknow item record %d/%d\n' % (cur, cur_len) unknown = item(m_[cur:cur+cur_len]) res += unknown['string'] cur += cur_len res += '---------------------------------\n' cur_len = 4 res += 'Flags %d/%d\n' % (cur, cur_len) res += flags(m_[cur: cur + cur_len]) cur += cur_len res += '---------------------------------\n' cur_len = dom['len1'] + user['len1'] + host['len1'] res += 'Domain, User, Host strings %d/%d\n%s\n%s\n\n' % (cur, cur_len, m_hex[cur * 2 :(cur + cur_len) * 2], utils.str2prn_str(m_[cur:cur + cur_len])) cur_len = dom['len1'] res += '%s\n' % m_hex[cur * 2: (cur + cur_len) * 2] res += 'Domain name %d/%d:\n' % (cur, cur_len) res += '%s\n\n' % (utils.str2prn_str(m_[cur: (cur + cur_len)])) cur += cur_len cur_len = user['len1'] res += '%s\n' % m_hex[cur * 2: (cur + cur_len) * 2] res += 'User name %d/%d:\n' % (cur, cur_len) res += '%s\n\n' % (utils.str2prn_str(m_[cur: (cur + cur_len)])) cur += cur_len cur_len = host['len1'] res += '%s\n' % m_hex[cur * 2: (cur + cur_len) * 2] res += 'Host name %d/%d:\n' % (cur, cur_len) res += '%s\n\n' % (utils.str2prn_str(m_[cur: (cur + cur_len)])) cur += cur_len res += '---------------------------------\n' cur_len = lmr['len1'] res += 'LAN Manager response %d/%d\n%s\n\n' % (cur, cur_len, m_hex[cur * 2 :(cur + cur_len) * 2]) cur += cur_len res += '---------------------------------\n' cur_len = ntr['len1'] res += 'NT response %d/%d\n%s\n\n' % (cur, cur_len, m_hex[cur * 2 :(cur + cur_len) * 2]) cur += cur_len res += '---------------------------------\n' cur_len = len(m_) - cur res += 'Rest of the message %d/%d:\n' % (cur, cur_len) res += unknown_part(m_[cur: cur + cur_len]) res += '\nEnd of message 3 report.\n' return res
def unknown_part (bin_str): res = 'Hex : %s\n' % utils.str2hex(bin_str, ' ') res += 'String : %s\n' % utils.str2prn_str(bin_str, ' ') res += 'Decimal: %s\n' % utils.str2dec(bin_str, ' ') return res
9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9b3b7bd7d9bd500538e8ae441a173f0dcd4087d4/ntlm.py
return end
return end-size
def size_number (text): base = which_base(text) if base == 0: return 0 length = len(text) size = size_base(base) end = size+1 while end < length and text[end] in _hex[:base]: end += 1 return end
581a22995be393676dd88ab5c6098b2afb554f13 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/581a22995be393676dd88ab5c6098b2afb554f13/convert.py
end = size_number(text)
end = start+size_number(text)
def convert (text): base = which_base(text) start = size_base(base) end = size_number(text) return base10(text[start:end], base)
581a22995be393676dd88ab5c6098b2afb554f13 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/581a22995be393676dd88ab5c6098b2afb554f13/convert.py
if __name__ == '__main__': print "---" print "base10(\"FF\",16) = ", 255, "\tgot ", base10("FF",16) print "base10(\"77\", 8) = ", 63, "\tgot ", base10("77",8) print "---" print "convert(\"0xFF\" ) = ", 255, "\tgot ", convert("0xFF") print "convert(\"\\xFF\" ) = ", 255, "\tgot ", convert("\\xFF") print "convert(\"077\" ) = ", 63, "\tgot ", convert("077") print "convert(\"\\77\" ) = ", 63, "\tgot ", convert("\\77") print "convert(\"\\177E\" ) = ", 127, "\tgot ", convert("\\177E"), "The E is not used" print "---" print "size_number(\"100FFF\") = ", 3, "\tgot", size_number("100qwerty") print "size_number(\"\\7799\" ) = ", 3, "\tgot", size_number("\\77FF") print "size_number(\"\\XFFG\" ) = ", 3, "\tgot", size_number("\\XFFG") print "---" print "index_number(\"0XF\" ) = ", 0, "\tgot", index_number("0XF") print "index_number(\"\\XF\" ) = ", 0, "\tgot", index_number("\\XF") print "index_number(\"FF\\FFGG\" ) = ", -1, "\tgot", index_number("FF\\FFGG") print "index_number(\"FF\\7\" ) = ", 2, "\tgot", index_number("FF\\7") print "index_number(\"FFF\\XFFGG\" ) = ", 3, "\tgot", index_number("FFF\\XFFGG") print "index_number(\"\\\\\\XFFGG\" ) = ", 2, "\tgot", index_number("FF\\XFFGG") print "---" print "little2 ","1 ",little2(chr( 1)+chr(0)) print "little2 ","16 ",little2(chr(16)+chr(0)) print "---" print "big2","1 ",big2(chr(0)+chr(1)) print "big2","16 ",big2(chr(0)+chr(16)) print "---" print "little4","2147483649",little4(chr(1)+chr(0)+chr(0)+chr(128)) print "big4 ","2147483649",big4(chr(128)+chr(0)+chr(0)+chr(1))
def local4 (number): if sys.byteorder == 'big': return big4(number) return little4(number)
581a22995be393676dd88ab5c6098b2afb554f13 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/581a22995be393676dd88ab5c6098b2afb554f13/convert.py
debug(PROXY, "%s closed, got empty data")
debug(PROXY, "%s closed, got empty data", self)
def handle_read (self): """read data from connection, put it into recv_buffer and call process_read""" assert self.connected
6cb6160e29079160b9225f2587e7402e042a40f6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/6cb6160e29079160b9225f2587e7402e042a40f6/Connection.py
debug(FILTER, "blocked url %s", url)
debug(FILTER, "blocked url %s: %s", url, str(blocked))
def doit (self, data, **args): # note: data is the complete request method, url, httpver = data.split() debug(FILTER, "block filter working on url %s", `url`) if self.allowed(url): return data blocked = self.strict_whitelist or self.blocked(url) if blocked: debug(FILTER, "blocked url %s", url) if isinstance(blocked, basestring): doc = blocked # index 3, not 2! elif is_image(url): doc = self.block_image else: # XXX hmmm, what about CGI images? # make HTTP HEAD request? doc = self.block_url port = config['port'] if method=='CONNECT': return 'CONNECT https://localhost:%d%s HTTP/1.1'%(port, doc) return 'GET http://localhost:%d%s HTTP/1.1'%(port, doc) return data
f32d2a1749d0cb0f35bb52a4e34151db130f20f9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/f32d2a1749d0cb0f35bb52a4e34151db130f20f9/Blocker.py