rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
except IOError, msg:
except IOError:
def iswriteable (fname): if os.path.isdir(fname) or os.path.islink(fname): return False try: if os.path.exists(fname): f = file(fname, 'a') f.close() return True else: f = file(fname, 'w') f.close() os.remove(fname) return True except IOError, msg: pass return False
cf22ff7ad41237a9a9236b001f6e9cf7d9e8a101 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cf22ff7ad41237a9a9236b001f6e9cf7d9e8a101/__init__.py
def reload_config (signum, frame):
def reload_config (*dummy): """reload configuration function with dummy params for (signum, frame) from the signal handler prototype """
def startfunc (handle=None): # init logging initlog(os.path.join(ConfigDir, "logging.conf")) # we run single-threaded, decrease check interval sys.setcheckinterval(500) # support reload on posix systems if os.name=='posix': import signal signal.signal(signal.SIGHUP, reload_config) # drop privileges os.chdir("/") # for web configuration, we cannot drop privileges #if os.geteuid()==0: # import pwd, grp # try: # pentry = pwd.getpwnam("nobody") # pw_uid = 2 # nobody = pentry[pw_uid] # gentry = grp.getgrnam("nogroup") # gr_gid = 2 # nogroup = gentry[gr_gid] # os.setgid(nogroup) # os.setuid(nobody) # except KeyError: # warn(WC, "could not drop root privileges, user nobody "+\ # "and/or group nogroup not found") # pass # read configuration global config config = Configuration() config.init_filter_modules() # start the proxy import wc.proxy wc.proxy.mainloop(handle=handle)
cf22ff7ad41237a9a9236b001f6e9cf7d9e8a101 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cf22ff7ad41237a9a9236b001f6e9cf7d9e8a101/__init__.py
def parse (self, fp, config): self.config = config
def parse (self, fp, _config): self.config = _config
def parse (self, fp, config): self.config = config super(WConfigParser, self).parse(fp) self.config['configfile'] = self.filename self.config['filters'].sort()
cf22ff7ad41237a9a9236b001f6e9cf7d9e8a101 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cf22ff7ad41237a9a9236b001f6e9cf7d9e8a101/__init__.py
self['noproxyfor'] = {} self['allowedhosts'] = {}
self['noproxyfor'] = [{}, [], {}] self['allowedhosts'] = [{}, [], {}]
def reset (self): """Reset to default values""" self['port'] = 8080 self['proxyuser'] = "" self['proxypass'] = "" self['parentproxy'] = "" self['parentproxyport'] = 3128 self['parentproxyuser'] = "" self['parentproxypass'] = "" self['logfile'] = "" self['strict_whitelist'] = 0 self['debuglevel'] = 0 self['rules'] = [] self['filters'] = [] self['filterlist'] = [[],[],[],[],[],[],[],[],[],[]] self['colorize'] = 0 self['noproxyfor'] = {} self['allowedhosts'] = {} self['starttime'] = time.time() self['requests'] = {'valid':0, 'error':0, 'blocked':0} self['local_sockets_only'] = 0 self['localip'] = socket.gethostbyname(socket.gethostname()) self['mime_content_rewriting'] = [] self['headersave'] = 100 self['showerrors'] = None
ae88298bade403afcc970bbd5a69ed1d59e2329e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ae88298bade403afcc970bbd5a69ed1d59e2329e/__init__.py
control = AnsiControl.get(ctype, '')+";"
control = AnsiControl.get(control, '')+";"
def esc_ansicolor (color): """convert a named color definition to an escaped ANSI color""" control = '' if ";" in color: control, color = color.split(";", 1) control = AnsiControl.get(ctype, '')+";" cnum = AnsiColor.get(color, '0') return AnsiEsc % (control+cnum)
a1f7a34a58f45535a944503b67de31088fa2cef0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/a1f7a34a58f45535a944503b67de31088fa2cef0/ansicolor.py
n = 0
n = 0L
def addr2bin (addr): if type(addr) == type(0): return addr bytes = addr.split('.') if len(bytes) != 4: raise ValueError, 'bad IP address' n = 0 for byte in bytes: n = n<<8 | int(byte) return n
fc9b88bb917a0ca81a8b61f8ad145d77419f879c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/fc9b88bb917a0ca81a8b61f8ad145d77419f879c/Lib.py
wc.log.error(wc.LOG_JS, "JS error at %s", self.url) wc.log.error(wc.LOG_JS, msg)
wc.log.debug(wc.LOG_JS, "JS error at %s", self.url) wc.log.debug(wc.LOG_JS, msg.rstrip())
def js_process_error (self, msg): """ Process javascript syntax error. """ wc.log.error(wc.LOG_JS, "JS error at %s", self.url) wc.log.error(wc.LOG_JS, msg)
ba10e217fe651dff2925c726fd473acac6e976ec /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ba10e217fe651dff2925c726fd473acac6e976ec/JSFilter.py
print >>log, wc.i18n._("adding new filter %s"), filename
print >>log, wc.i18n._("adding new filter %s")%filename
def update_filter (wconfig, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ chg = False baseurl = wconfig['baseurl']+"filter/" url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return chg # remember all local config files filemap = {} for filename in wc.filterconf_files(wconfig.filterdir): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >>log, "error fetching", url return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, wc.i18n._("filter %s not changed, ignoring")%filename continue print >>log, wc.i18n._("updating filter %s")%filename else: print >>log, wc.i18n._("adding new filter %s"), filename # parse new filter url = baseurl+filename page = open_url(url) p = wc.ZapperParser(fullname, wconfig, compile_data=False) p.parse(fp=page) page.close() if wconfig.merge_folder(p.folder, dryrun=dryrun, log=log): chg = True url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, wc.i18n._("error fetching %s:")%url, msg return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >>log, wc.i18n._("error fetching %s:")%url, wc.i18n._("invalid content") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, wc.i18n._("extern filter %s not changed, ignoring")%filename continue print >>log, wc.i18n._("updating extern filter %s")%filename else: print >>log, wc.i18n._("adding new extern filter %s")%filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, wc.i18n._("error fetching %s:")%url, msg continue data = page.read() if not data: print >>log, wc.i18n._("error fetching %s:")%url, \ wc.i18n._("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() return chg
16819cf87bbfc6e69ec569eb0e9526bca620ca26 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/16819cf87bbfc6e69ec569eb0e9526bca620ca26/update.py
version = "0.14",
version = "0.15",
def create_conf_file(self, directory, data=[]): data.insert(0, "# this file is automatically created by setup.py") filename = os.path.join(directory, self.config_file) # add metadata metanames = dir(self.metadata) + \ ['fullname', 'contact', 'contact_email'] for name in metanames: method = "get_" + name cmd = "%s = %s" % (name, `getattr(self.metadata, method)()`) data.append(cmd) util.execute(write_file, (filename, data), "creating %s" % filename, self.verbose>=1, self.dry_run)
5e1a663977cfe9c4df879543fa3560e73858171f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5e1a663977cfe9c4df879543fa3560e73858171f/setup.py
val = getattr(self, attr)[len(self.root):]
cutoff = len(self.root) if self.root.endswith(os.sep): cutoff -= 1 val = getattr(self, attr)[cutoff:]
def run (self): super(MyInstall, self).run() # we have to write a configuration file because we need the # <install_data> directory (and other stuff like author, url, ...) data = [] for d in ['purelib', 'platlib', 'lib', 'headers', 'scripts', 'data']: attr = 'install_%s'%d if self.root: # cut off root path prefix val = getattr(self, attr)[len(self.root):] else: val = getattr(self, attr) if attr=="install_data": base = os.path.join(val, 'share', 'webcleaner') data.append('config_dir = %r' % \ os.path.normcase(os.path.join(base, 'config'))) data.append('template_dir = %r' % \ os.path.normcase(os.path.join(base, 'templates'))) data.append("%s = %r" % (attr, val)) from pprint import pformat data.append('outputs = %s' % pformat(self.get_outputs())) self.distribution.create_conf_file(self.install_lib, data)
590a828071d2dc4b9e53aa7e1bcf915ad6d705fe /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/590a828071d2dc4b9e53aa7e1bcf915ad6d705fe/setup.py
context=None, target_language=None, default=None):
def translate (self, domain, msgid, mapping=None, """Interpolates and translate TAL expression.""" context=None, target_language=None, default=None): _msg = self.gettext(msgid) wc.log.debug(wc.LOG_TAL, "TRANSLATED %r %r", msgid, _msg) return wc.webgui.TAL.TALInterpreter.interpolate(_msg, mapping)
5f13f232088408097e4d2517a71797db42dfb5f3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5f13f232088408097e4d2517a71797db42dfb5f3/webconfig.py
['webcleaner.bat', 'filtertest', 'filtertest.html']),
['webcleaner.bat']),
def create_conf_file(self, directory, data=[]): data.insert(0, "# this file is automatically created by setup.py") filename = os.path.join(directory, self.config_file) # add metadata metanames = dir(self.metadata) + \ ['fullname', 'contact', 'contact_email'] for name in metanames: method = "get_" + name cmd = "%s = %s" % (name, `getattr(self.metadata, method)()`) data.append(cmd) util.execute(write_file, (filename, data), "creating %s" % filename, self.verbose>=1, self.dry_run)
91836b953780c6dfbf4c8ccc65f64489d8d3e819 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/91836b953780c6dfbf4c8ccc65f64489d8d3e819/setup.py
fp = file(os.path.join(os.getcwd(), "templates", "classic", "macros", "rules.html"))
fp = file(os.path.join(os.getcwd(), "test", "html", "taltest.html"))
def get_context (): # init and return TALES context context = simpleTALES.Context() context.addGlobal("parameter", "hullabulla") return context
9b44bcbaa665bda45f87c1c62d39b859d2325f65 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9b44bcbaa665bda45f87c1c62d39b859d2325f65/test_simpletal.py
headers = WcMessage()
headers = wc.http.header.WcMessage()
def get_wc_client_headers (host): """ Get default webcleaner proxy request headers. """ headers = WcMessage() headers['Host'] = '%s\r' % host headers['Accept-Encoding'] = 'gzip;q=1.0, deflate;q=0.9, identity;q=0.5\r' headers['Connection'] = 'Keep-Alive\r' headers['Keep-Alive'] = 'timeout=300\r' headers['User-Agent'] = 'Calzilla/6.0\r' return headers
904784a9ac73c74eef871f586fa40a1bff5e9e51 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/904784a9ac73c74eef871f586fa40a1bff5e9e51/Headers.py
for key, value in nonces.items():
for nonce, value in nonces.items():
def check_nonces (): # deprecate old nonces for key, value in nonces.items(): noncetime = time.time() - value if noncetime > max_noncesecs: del nonces[nonce]
a9ecfa743f3d77d6172bb8694a2020e54d66ad71 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/a9ecfa743f3d77d6172bb8694a2020e54d66ad71/ntlm.py
reaminder = remainder.strip()
remainder = remainder.strip()
def parse_ntlm_challenge (challenge): """parse both type0 and type2 challenges""" if "," in challenge: chal, remainder = challenge.split(",", 1) else: chal, remainder = challenge, "" chal = chal.strip() reaminder = remainder.strip() if not chal: # empty challenge (type0) encountered res = {'type': NTLMSSP_INIT} else: msg = base64.decodestring(chal) res = parse_message2(msg) if not res: warn(AUTH, "invalid NTLM challenge %s", `msg`) return res, remainder
a9ecfa743f3d77d6172bb8694a2020e54d66ad71 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/a9ecfa743f3d77d6172bb8694a2020e54d66ad71/ntlm.py
def _kind (self,full_type,endian): if endian == 'local': kind = full_type else: kind = full_type[2:]
fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8/magic.py
kind = "string"
if "c" in kind[7:]: kind = "stringnocase" else: kind = "string"
def _kind (self,full_type,endian): if endian == 'local': kind = full_type else: kind = full_type[2:]
fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8/magic.py
if kind.startswith("ldate-"):
elif kind.startswith("ldate-"):
def _kind (self,full_type,endian): if endian == 'local': kind = full_type else: kind = full_type[2:]
fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8/magic.py
elif kind == "string" and (result[pos] in string.ascii_letters or result[pos] in string.digits): data.append(ord(result[pos])*1L) pos +=1
def _data (self,kind,result): pos = 0 data = list('') while pos < len(result): if convert.is_c_escape(result[pos:]): # \0 is not a number it is the null string if result[pos+1] == '0': data.append(result[pos]) data.append(0L) # \rnt are special else: data.append(result[pos:pos+2]) pos +=2 elif kind == "string" and (result[pos] in string.ascii_letters or result[pos] in string.digits): data.append(ord(result[pos])*1L) pos +=1 else: base = convert.which_base(result[pos:])
fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8/magic.py
def _data (self,kind,result): pos = 0 data = list('') while pos < len(result): if convert.is_c_escape(result[pos:]): # \0 is not a number it is the null string if result[pos+1] == '0': data.append(result[pos]) data.append(0L) # \rnt are special else: data.append(result[pos:pos+2]) pos +=2 elif kind == "string" and (result[pos] in string.ascii_letters or result[pos] in string.digits): data.append(ord(result[pos])*1L) pos +=1 else: base = convert.which_base(result[pos:])
fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8/magic.py
if kind == "string":
if kind.startswith("string"):
def _length (self, kind, data): # Calculate the size of the data to read in the file if kind == "string": replace = "" for i in data: # except: Too lazy to handle the '\r' and co otherwise try: replace += chr(i) except: replace+='*' # This is for "\0" replace = replace.replace('*\0','*') # This is for two "\" replace = replace.replace('\\\\','*') # This is for the remaining "\{whatever}" replace = replace.replace('\\','') length = len(replace) else: length = self.data_size[kind] return length
fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8/magic.py
index += 1
def read_magic (self, magic_file): self.magic = []
fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8/magic.py
if kind == 'string': if self._is_null_string(data): success = True elif len(data) == len(extract): success = True for index in range(len(data)): if ord(extract[index]) != data[index]: success = False
if kind.startswith('string'): success = (data==value)
def classify (self, f): if not self.entries: raise StandardError("Not initialised properly") # Are we still looking for the ruleset to apply or are we in a rule found_rule = False # When we found the rule, what is the level that we successfull passed in_level = 0 # If we failed part of the rule there is no point looking for higher level subrule allow_next = 0 # String provided by the successfull rule result = ""
fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8/magic.py
except (Failed, IOError):
except (Failed, IOError), msg:
def classify (self, f): if not self.entries: raise StandardError("Not initialised properly") # Are we still looking for the ruleset to apply or are we in a rule found_rule = False # When we found the rule, what is the level that we successfull passed in_level = 0 # If we failed part of the rule there is no point looking for higher level subrule allow_next = 0 # String provided by the successfull rule result = ""
fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/fa8a059590f5621889e8f0481dc5d1c6f0c2d5c8/magic.py
label = Label(master, text=msg % d, anchor=tk.W, justify=tk.LEFT)
label = tk.Label(master, text=msg % d, anchor=tk.W, justify=tk.LEFT)
def body(self, master): d = {"appname": wc.AppName} msg = _("""The administrator password protects the web
bced9bf02aea78485c59aec9dfc85f8c8bae7f41 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/bced9bf02aea78485c59aec9dfc85f8c8bae7f41/install-webcleaner.py
label = Label(master, text=_("Password:"))
label = tk.Label(master, text=_("Password:"))
def body(self, master): d = {"appname": wc.AppName} msg = _("""The administrator password protects the web
bced9bf02aea78485c59aec9dfc85f8c8bae7f41 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/bced9bf02aea78485c59aec9dfc85f8c8bae7f41/install-webcleaner.py
self.pass_entry = Entry(master)
self.pass_entry = tk.Entry(master)
def body(self, master): d = {"appname": wc.AppName} msg = _("""The administrator password protects the web
bced9bf02aea78485c59aec9dfc85f8c8bae7f41 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/bced9bf02aea78485c59aec9dfc85f8c8bae7f41/install-webcleaner.py
title = _("%s administrator password") % wc.AppName PasswordDialog(root, title=title)
title = _("%s administrator password") % wc.AppName PasswordDialog(root, title=title)
def apply(self): password = self.pass_entry.get() if password: save_adminpassword(password) else: print _("Not saving empty password.")
bced9bf02aea78485c59aec9dfc85f8c8bae7f41 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/bced9bf02aea78485c59aec9dfc85f8c8bae7f41/install-webcleaner.py
wc.log.debug(wc.LOG_DNS, "%s switching to TCP", self)
def handle_timeout (self): # The DNS server hasn't responded to us, or we've lost the # packet somewhere, so let's try it again, unless the retry # count is too large. Each time we retry, we increase the # timeout (see send_dns_request). if not self.callback: return # It's already handled, so ignore this wc.log.warn(wc.LOG_DNS, "%s DNS timeout", self) if not self.connected: self.callback(self.hostname, DnsResponse('error', 'timed out connecting')) self.callback = None return self.retries += 1 if (not self.tcp and dns_accepts_tcp.get(self.nameserver, True) and self.retries == 1): # Switch to TCP self.TIMEOUT = 20 self.close() self.tcp = True self.establish_connection() elif self.retries < 5: self.send_dns_request() elif not self.tcp and self.retries < 12: self.send_dns_request() else: if self.callback: self.callback(self.hostname, DnsResponse('error', 'timed out')) self.callback = None if self.connected: self.close()
5a4c5eb0aa5cec34014b47c719bb0a05900adcdf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5a4c5eb0aa5cec34014b47c719bb0a05900adcdf/dns_lookups.py
wc.log.warn(wc.LOG_DNS, '%s was no response to %s',
wc.log.warn(wc.LOG_DNS, 'Wrong response %s to query %s',
def process_read (self): if not self.callback: self.close() # Assume that the entire answer comes in one packet if self.tcp: if len(self.recv_buffer) < 2: return header = self.recv_buffer[:2] (l,) = struct.unpack("!H", header) if len(self.recv_buffer) < 2+l: return self.read(2) # header wire = self.read(l) try: self.socket.shutdown(1) except socket.error: pass else: wire = self.read(1024) response = wc.dns.message.from_wire( wire, keyring=self.query.keyring, request_mac=self.query.mac) wc.log.debug(wc.LOG_DNS, "got DNS response %s", response) if not self.query.is_response(response): wc.log.warn(wc.LOG_DNS, '%s was no response to %s', response, self.query) # Oops, this doesn't answer the right question. This can # happen because we're using UDP, and UDP replies might end # up in the wrong place: open conn A, send question to A, # timeout, send question to A, receive answer, close our # object, then open a new conn B, send question to B, # but get the OLD answer to A as a reply. This doesn't happen # with TCP but then TCP is slower.
5a4c5eb0aa5cec34014b47c719bb0a05900adcdf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5a4c5eb0aa5cec34014b47c719bb0a05900adcdf/dns_lookups.py
pattern = re.sub(r"([^.])\*([^?])", r"\1[^/]*\2", pattern)
pattern = re.sub(r"([^.])\*([^?]|$)", r"\1[^/]*\2", pattern)
def convert_adzapper_pattern (pattern): pattern = pattern.replace(".", "\\.") pattern = pattern.replace("?", "\\?") pattern = pattern.replace("**", ".*?") pattern = re.sub(r"([^.])\*([^?])", r"\1[^/]*\2", pattern) return pattern
657f0d0ee0e77459d91104d389ffabd1966868e0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/657f0d0ee0e77459d91104d389ffabd1966868e0/adzap2wc.py
url="%(url)s"
url="%(url)s"/>
def write_allow (zapfile, adclass, pattern): #print "%s allow %s" % (adclass, `pattern`) d = get_rule_dict(adclass, pattern) zapfile.write("""<allow title="%(title)s" desc="%(desc)s" url="%(url)s"
657f0d0ee0e77459d91104d389ffabd1966868e0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/657f0d0ee0e77459d91104d389ffabd1966868e0/adzap2wc.py
url="%(url)s" """ % d)
url="%(url)s""" % d)
def write_block (zapfile, adclass, pattern, replacement=None): #print "%s block %s => %s" % (adclass, `pattern`, `replacement`) d = get_rule_dict(adclass, pattern) zapfile.write("""<block title="%(title)s" desc="%(desc)s" url="%(url)s" """ % d) if replacement is not None: zapfile.write(">%s</block>" % xmlify(replacement)) else: zapfile.write("/>") zapfile.write("\n")
657f0d0ee0e77459d91104d389ffabd1966868e0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/657f0d0ee0e77459d91104d389ffabd1966868e0/adzap2wc.py
zapfile.write(">%s</block>" % xmlify(replacement))
zapfile.write("\">%s</block>" % xmlify(replacement))
def write_block (zapfile, adclass, pattern, replacement=None): #print "%s block %s => %s" % (adclass, `pattern`, `replacement`) d = get_rule_dict(adclass, pattern) zapfile.write("""<block title="%(title)s" desc="%(desc)s" url="%(url)s" """ % d) if replacement is not None: zapfile.write(">%s</block>" % xmlify(replacement)) else: zapfile.write("/>") zapfile.write("\n")
657f0d0ee0e77459d91104d389ffabd1966868e0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/657f0d0ee0e77459d91104d389ffabd1966868e0/adzap2wc.py
zapfile.write("/>")
zapfile.write("\"/>")
def write_block (zapfile, adclass, pattern, replacement=None): #print "%s block %s => %s" % (adclass, `pattern`, `replacement`) d = get_rule_dict(adclass, pattern) zapfile.write("""<block title="%(title)s" desc="%(desc)s" url="%(url)s" """ % d) if replacement is not None: zapfile.write(">%s</block>" % xmlify(replacement)) else: zapfile.write("/>") zapfile.write("\n")
657f0d0ee0e77459d91104d389ffabd1966868e0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/657f0d0ee0e77459d91104d389ffabd1966868e0/adzap2wc.py
newfunc.__doc__ += func.__doc__
if func.__doc__ is not None: newfunc.__doc__ += func.__doc__
def newfunc (*args, **kwargs): """ Print deprecated warning and execute original function. """ warnings.warn("Call to deprecated function %s." % func.__name__, category=DeprecationWarning) return func(*args, **kwargs)
b786bca2bd1448c567a75ba57e788bfda734efe5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/b786bca2bd1448c567a75ba57e788bfda734efe5/decorators.py
newfunc.__doc__ += func.__doc__
if func.__doc__ is not None: newfunc.__doc__ += func.__doc__
def newfunc (*args, **kwargs): """ Execute function synchronized. """ lock.acquire(True) # blocking try: return func(*args, **kwargs) finally: lock.release()
b786bca2bd1448c567a75ba57e788bfda734efe5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/b786bca2bd1448c567a75ba57e788bfda734efe5/decorators.py
newfunc.__doc__ = func.__doc__
if func.__doc__ is not None: newfunc.__doc__ = func.__doc__
def newfunc (*args, **kwargs): """ Raise NotImplementedError """ raise NotImplementedError("%s not implemented" % func.__name__)
b786bca2bd1448c567a75ba57e788bfda734efe5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/b786bca2bd1448c567a75ba57e788bfda734efe5/decorators.py
lines = get_file_data(rule.file)
lines = self.get_file_data(rule.file)
def add_blockdomains (self, rule): print "blockdomains", rule.file lines = get_file_data(rule.file) for line in lines: line = line.strip() if not line or line[0]=='#': continue self.blocked_domains.append(line)
34bab8a1b090494a897c77a731b72a788ec5d7b9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/34bab8a1b090494a897c77a731b72a788ec5d7b9/Blocker.py
lines = get_file_data(rule.file)
lines = self.get_file_data(rule.file)
def add_blockurls (self, rule): print "blockurls", rule.file lines = get_file_data(rule.file) for line in lines: line = line.strip() if not line or line[0]=='#': continue self.blocked_urls.append(line.split("/", 1))
34bab8a1b090494a897c77a731b72a788ec5d7b9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/34bab8a1b090494a897c77a731b72a788ec5d7b9/Blocker.py
if urltuple[1] == _block:
if urlTuple[1] == _block:
def blocked (self, urlTuple): # check blocked domains for _block in self.blocked_domains: debug(NIGHTMARE, "block domain", _block) if urltuple[1] == _block: return 0 # check blocked urls for _block in self.blocked_urls: debug(NIGHTMARE, "block url", _block) if urlTuple[1]==_block[0] and urlTuple[2].startswith(_block[1]): return 0 # check block patterns for _block in self.block: match = 1 for i in range(len(urlTuple)): if _block[i]: debug(NIGHTMARE, "block pattern", _block[i].pattern) if not _block[i].search(urlTuple[i]): debug(NIGHTMARE, "no match") match = 0 if match and not self.allowed(urlTuple): debug(HURT_ME_PLENTY, "blocked", urlTuple, "with", _block[-1]) return _block[-1] return None
34bab8a1b090494a897c77a731b72a788ec5d7b9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/34bab8a1b090494a897c77a731b72a788ec5d7b9/Blocker.py
p = wc.configuration.ZapperParser(fullname, compile_data=False)
parserclass = wc.configuration.confparse.ZapperParser p = parserclass(fullname, compile_data=False)
def update_filter (wconfig, dryrun=False, log=None): """ Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing. @raise: IOError """ print >> log, _("updating filters"), "..." chg = False baseurl = wconfig['baseurl']+"filter/" url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >> log, _("error fetching %s") % url, msg print >> log, "...", _("done") return chg # remember all local config files filemap = {} for filename in wc.configuration.filterconf_files(wconfig.filterdir): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >> log, _("error fetching %s") % url print >> log, "...", _("done") return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wconfig.configdir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest == md5sum: print >> log, \ _("filter %s not changed, ignoring") % filename continue print >> log, _("updating filter %s") % filename else: print >> log, _("adding new filter %s") % filename # parse new filter url = baseurl + filename page = open_url(url) p = wc.configuration.ZapperParser(fullname, compile_data=False) p.parse(fp=page) page.close() # compare version compatibility if wconfig['configversion'][0] != p.folder.configversion[0]: print >> log, _("Incompatible folder version %s, must be %s") % \ (wconfig['configversion'], p.folder.configversion) if wconfig.merge_folder(p.folder, dryrun=dryrun, log=log): chg = True url = baseurl + "extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >> log, _("error fetching %s:") % url, msg print >> log, "...", _("done") return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >> log, _("error fetching %s:") % url, \ _("invalid content") print >> log, "...", _("done") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wconfig.configdir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest == md5sum: print >> log, \ _("extern filter %s not changed, ignoring")%filename continue print >> log, _("updating extern filter %s") % filename else: print >> log, _("adding new extern filter %s") % filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >> log, _("error fetching %s:") % url, msg continue data = page.read() if not data: print >> log, _("error fetching %s:") % url, \ _("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() print >> log, "...", _("done") return chg
626d2e30cedf765e6ee0dea17a496b4e2a033365 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/626d2e30cedf765e6ee0dea17a496b4e2a033365/update.py
self.macintosh = os.name == 'mac' or \ (os.name == 'posix' and sys.platform.startswith('darwin'))
def __init__ (self): self.in_winhelp = False # inside object tag calling WinHelp # running on MacOS or MacOSX self.macintosh = os.name == 'mac' or \ (os.name == 'posix' and sys.platform.startswith('darwin'))
f4cc3c8b417b7eb9f4ab026bc1df56540dd8e0f9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/f4cc3c8b417b7eb9f4ab026bc1df56540dd8e0f9/HtmlSecurity.py
if attrs.has_key('content') and self.macintosh: if attrs.get('http-equiv', '').lower() == 'refresh':
if attrs.has_key('content'): refresh = attrs.get('http-equiv', attrs.get('name', '')) if refresh.lower() == 'refresh':
def meta_start (self, attrs, htmlfilter): """ Check <meta> start tag. """ if attrs.has_key('content') and self.macintosh: # prevent CVE-2002-0153 if attrs.get('http-equiv', '').lower() == 'refresh': url = attrs['content'].lower() if ";" in url: url = url.split(";", 1)[1] if url.startswith('url='): url = url[4:] if url.startswith('file:/'): msg = "%s %r\n Detected and prevented local file " \ "redirection" wc.log.warn(wc.LOG_FILTER, msg, htmlfilter, attrs['content']) del attrs['content']
f4cc3c8b417b7eb9f4ab026bc1df56540dd8e0f9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/f4cc3c8b417b7eb9f4ab026bc1df56540dd8e0f9/HtmlSecurity.py
if ";" in url: url = url.split(";", 1)[1] if url.startswith('url='): url = url[4:] if url.startswith('file:/'): msg = "%s %r\n Detected and prevented local file " \ "redirection" wc.log.warn(wc.LOG_FILTER, msg, htmlfilter, attrs['content']) del attrs['content']
url = wc.strformat.stripall(url) for url in url.split(";url="): if not url.startswith('http://'): msg = "%s %r\n Detected invalid redirection." wc.log.warn(wc.LOG_FILTER, msg, htmlfilter, attrs['content']) del attrs['content'] break
def meta_start (self, attrs, htmlfilter): """ Check <meta> start tag. """ if attrs.has_key('content') and self.macintosh: # prevent CVE-2002-0153 if attrs.get('http-equiv', '').lower() == 'refresh': url = attrs['content'].lower() if ";" in url: url = url.split(";", 1)[1] if url.startswith('url='): url = url[4:] if url.startswith('file:/'): msg = "%s %r\n Detected and prevented local file " \ "redirection" wc.log.warn(wc.LOG_FILTER, msg, htmlfilter, attrs['content']) del attrs['content']
f4cc3c8b417b7eb9f4ab026bc1df56540dd8e0f9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/f4cc3c8b417b7eb9f4ab026bc1df56540dd8e0f9/HtmlSecurity.py
title = "[%s] %s" % (self.rule.get_name(), title) sender.setText(title) debug(BRING_IT_ON, "Rule title changed")
tmptitle = "[%s] %s" % (self.rule.get_name(), title) else: tmptitle = title sender.setText(tmptitle)
def onCmdTitle (self, sender, sel, ptr): title = sender.getText().strip() if not title: error(i18n._("empty title")) sender.setText(self.rule.title) return 1 self.rule.title = title self.getApp().dirty = 1 if self.rule.get_name()!="folder": title = "[%s] %s" % (self.rule.get_name(), title) sender.setText(title) debug(BRING_IT_ON, "Rule title changed") # send message to main window for treelist updating win = self.getApp().getMainWindow() win.handle(sender, MKUINT(win.ID_TITLE, SEL_COMMAND), ptr) return 1
691ab1c0528653379e00187d3fe57140cf58ddad /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/691ab1c0528653379e00187d3fe57140cf58ddad/FXRuleFrame.py
"blocked url %s by rule %s", url, sid) if isinstance(blocked, basestring):
"blocked url %s with %s by rule %s", url, blocked, sid) if isinstance(blocked, str):
def doit (self, data, attrs): """ Investigate request data for a block.
d62ecb76dd0be0d2d392699dfb91ce722ea7ae65 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d62ecb76dd0be0d2d392699dfb91ce722ea7ae65/Blocker.py
scheme = "http" return 'GET %s://localhost:%d%s HTTP/1.1' % (scheme, port, doc)
if not doc.startswith("http://"): doc = "http://localhost:%d%s" % (port, doc) return 'GET %s HTTP/1.1' % doc
def doit (self, data, attrs): """ Investigate request data for a block.
d62ecb76dd0be0d2d392699dfb91ce722ea7ae65 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d62ecb76dd0be0d2d392699dfb91ce722ea7ae65/Blocker.py
def ignorableWhitespace(self, d): """handler for ignorable whitespace""" self.buffer_append_data([DATA, d])
def ignorableWhitespace(self, d): """handler for ignorable whitespace""" self.buffer_append_data([DATA, d])
2b2a60f134ab7bf88c4041512cdda049d51436f6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2b2a60f134ab7bf88c4041512cdda049d51436f6/Rewriter.py
if '"' in val: s += " %s='%s'"%(name,val) else: s += ' %s="%s"'%(name,val)
s += ' %s'%name if val: if val.find('"')!=-1: s += "='%s'"%val else: s += '="%s"'%val
def buffer2data(self): """Append all tags of the buffer to the data""" for n in self.buffer: if n[0]==DATA: self.data += n[1] elif n[0]==COMMENT: self.data += "<!--%s-->"%n[1] elif n[0]==STARTTAG: s = "<"+n[1] for name,val in n[2].items(): if '"' in val: s += " %s='%s'"%(name,val) else: s += ' %s="%s"'%(name,val) self.data += s+">" elif n[0]==ENDTAG: self.data += "</%s>"%n[1] else: error("unknown buffer element %s" % n[0]) self.buffer = []
2b2a60f134ab7bf88c4041512cdda049d51436f6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2b2a60f134ab7bf88c4041512cdda049d51436f6/Rewriter.py
print "XXX", lang, msg
def add_i18n_context (context, lang): # language and i18n context_add(context, "lang", lang) try: translator = wc.get_translator(lang, translatorklass=Translator) except IOError, msg: print "XXX", lang, msg translator = NullTranslator() context_add(context, "i18n", translator)
c84bb1673738bd4bce74e6c98005c223ae06ad52 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/c84bb1673738bd4bce74e6c98005c223ae06ad52/webconfig.py
_msg = TALInterpreter.interpolate(_msg, mapping) return _msg
return wc.webgui.TAL.TALInterpreter.interpolate(_msg, mapping)
def translate (self, domain, msgid, mapping=None, context=None, target_language=None, default=None): _msg = self.gettext(msgid) wc.log.debug(wc.LOG_TAL, "TRANSLATE %s %s %s %s", msgid, _msg, mapping, context) _msg = TALInterpreter.interpolate(_msg, mapping) return _msg
c84bb1673738bd4bce74e6c98005c223ae06ad52 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/c84bb1673738bd4bce74e6c98005c223ae06ad52/webconfig.py
def _buf_append_data (self, data):
def buf_append_data (self, data):
def _buf_append_data (self, data): """we have to make sure that we have no two following DATA things in the tag buffer. Why? To be 100% sure that an ENCLOSED match really matches enclosed data. """ #self._debug(NIGHTMARE, "buf_append_data") if data[0]==DATA and self.buf and self.buf[-1][0]==DATA: self.buf[-1][1] += data[1] else: self.buf.append(data)
84fbb098dcc217798789e0d75b90b3973f33e5ff /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/84fbb098dcc217798789e0d75b90b3973f33e5ff/HtmlParser.py
// error fetching script from 'http://imadoofus.org/notfound.js'
// error fetching script from u'http://imadoofus.org/notfound.js'
def testScriptSrc4 (self): self.filt(
20937dda95e62258129dbde4040c70a91b3aa9d0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/20937dda95e62258129dbde4040c70a91b3aa9d0/test_rewritescript.py
self.client.server_close()
self.client.server_close(self)
def put_response (self, data, protocol, status, msg, headers): response = "%s %d %s"%(protocol, status, msg) self.client.server_response(self, response, status, headers) self.client.server_content(data) self.client.server_close()
62d4636ce54feef4b9975dd71b99376f23d7ca21 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/62d4636ce54feef4b9975dd71b99376f23d7ca21/__init__.py
wc.proxy.HEADERS.append((self.url, 1, self.headers.headers))
def process_headers(self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return
df1de478c20eda8c306e8db15d03c1d2b49b9eb0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/df1de478c20eda8c306e8db15d03c1d2b49b9eb0/HttpServer.py
def process_headers(self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return
df1de478c20eda8c306e8db15d03c1d2b49b9eb0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/df1de478c20eda8c306e8db15d03c1d2b49b9eb0/HttpServer.py
self.attrs = initStateObjects(self.headers, self.url)
def process_headers(self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return
df1de478c20eda8c306e8db15d03c1d2b49b9eb0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/df1de478c20eda8c306e8db15d03c1d2b49b9eb0/HttpServer.py
rating_store = _get_rating_store(_Storage)
rating_store = _get_ratings()
def _calc_ratings_display (): """ Calculate current set of ratings to display. """ global ratings_display, rating_modified urls = rating_store.keys() urls.sort() ratings_display = urls[curindex:curindex+_entries_per_page] rating_modified.clear() for _url in ratings_display: t = _strtime(rating_store[_url].modified) rating_modified[_url] = t.replace(u" ", u"&nbsp;")
1f977e01e9b961b22b70c8bdcf7577050a2311bb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/1f977e01e9b961b22b70c8bdcf7577050a2311bb/rating_html.py
if url in rating_store: rating = rating_store[url] else: rating = _Rating(url, generic)
rating = _Rating(url, generic)
def _form_apply (): """ Store changed ratings. """ if url in rating_store: rating = rating_store[url] else: rating = _Rating(url, generic) rating.remove_categories() for catname, value in values.items(): category = _get_category(catname) if category.iterable: value = [x for x in value if value[x]][0] else: value = _intrange_from_string(value) if value is None: error['ratingupdated'] = True return rating.add_category_value(category, value) rating_store[url] = rating try: rating_store.write() info['ratingupdated'] = True except: error['ratingupdated'] = True
1f977e01e9b961b22b70c8bdcf7577050a2311bb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/1f977e01e9b961b22b70c8bdcf7577050a2311bb/rating_html.py
except SSL.WantReadError:
wc.log.debug(wc.LOG_NET, 'data %r', data) except SSL.WantReadError, err: wc.log.debug(wc.LOG_NET, '%s want read error %s', self, err)
def handle_read (self): """read data from SSL connection, put it into recv_buffer and call process_read""" assert self.connected wc.log.debug(wc.LOG_PROXY, '%s SslConnection.handle_read', self) if len(self.recv_buffer) > wc.proxy.Connection.MAX_BUFSIZE: wc.log.warn(wc.LOG_PROXY, '%s read buffer full', self) return try: data = self.socket.read(wc.proxy.Connection.RECV_BUFSIZE) except SSL.WantReadError: # you _are_ already reading, stupid return except SSL.WantWriteError: # you want to write? here you go self.handle_write() return except SSL.WantX509LookupError, err: wc.log.exception(wc.LOG_PROXY, "%s ssl read message", self) return except SSL.ZeroReturnError, err: wc.log.debug(wc.LOG_PROXY, "%s ssl finished successfully", self) self.delayed_close() return except SSL.Error, err: wc.log.exception(wc.LOG_PROXY, "read error %s", err) self.handle_error('read error') return if not data: # It's been closed, and handle_close has been called wc.log.debug(wc.LOG_PROXY, "%s closed, got empty data", self) return wc.log.debug(wc.LOG_NET, '%s <= read %d', self, len(data)) wc.log.debug(wc.LOG_NET, 'data %r', data) self.recv_buffer += data self.process_read()
2c6283b225556a48147643da0a0cb5f0a3536753 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2c6283b225556a48147643da0a0cb5f0a3536753/SslConnection.py
except SSL.WantWriteError:
except SSL.WantWriteError, err: wc.log.debug(wc.LOG_NET, '%s want write error %s', self, err)
def handle_read (self): """read data from SSL connection, put it into recv_buffer and call process_read""" assert self.connected wc.log.debug(wc.LOG_PROXY, '%s SslConnection.handle_read', self) if len(self.recv_buffer) > wc.proxy.Connection.MAX_BUFSIZE: wc.log.warn(wc.LOG_PROXY, '%s read buffer full', self) return try: data = self.socket.read(wc.proxy.Connection.RECV_BUFSIZE) except SSL.WantReadError: # you _are_ already reading, stupid return except SSL.WantWriteError: # you want to write? here you go self.handle_write() return except SSL.WantX509LookupError, err: wc.log.exception(wc.LOG_PROXY, "%s ssl read message", self) return except SSL.ZeroReturnError, err: wc.log.debug(wc.LOG_PROXY, "%s ssl finished successfully", self) self.delayed_close() return except SSL.Error, err: wc.log.exception(wc.LOG_PROXY, "read error %s", err) self.handle_error('read error') return if not data: # It's been closed, and handle_close has been called wc.log.debug(wc.LOG_PROXY, "%s closed, got empty data", self) return wc.log.debug(wc.LOG_NET, '%s <= read %d', self, len(data)) wc.log.debug(wc.LOG_NET, 'data %r', data) self.recv_buffer += data self.process_read()
2c6283b225556a48147643da0a0cb5f0a3536753 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2c6283b225556a48147643da0a0cb5f0a3536753/SslConnection.py
wc.log.exception(wc.LOG_PROXY, "%s ssl read message", self)
wc.log.exception(wc.LOG_PROXY, "%s ssl read message %s", self, err)
def handle_read (self): """read data from SSL connection, put it into recv_buffer and call process_read""" assert self.connected wc.log.debug(wc.LOG_PROXY, '%s SslConnection.handle_read', self) if len(self.recv_buffer) > wc.proxy.Connection.MAX_BUFSIZE: wc.log.warn(wc.LOG_PROXY, '%s read buffer full', self) return try: data = self.socket.read(wc.proxy.Connection.RECV_BUFSIZE) except SSL.WantReadError: # you _are_ already reading, stupid return except SSL.WantWriteError: # you want to write? here you go self.handle_write() return except SSL.WantX509LookupError, err: wc.log.exception(wc.LOG_PROXY, "%s ssl read message", self) return except SSL.ZeroReturnError, err: wc.log.debug(wc.LOG_PROXY, "%s ssl finished successfully", self) self.delayed_close() return except SSL.Error, err: wc.log.exception(wc.LOG_PROXY, "read error %s", err) self.handle_error('read error') return if not data: # It's been closed, and handle_close has been called wc.log.debug(wc.LOG_PROXY, "%s closed, got empty data", self) return wc.log.debug(wc.LOG_NET, '%s <= read %d', self, len(data)) wc.log.debug(wc.LOG_NET, 'data %r', data) self.recv_buffer += data self.process_read()
2c6283b225556a48147643da0a0cb5f0a3536753 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2c6283b225556a48147643da0a0cb5f0a3536753/SslConnection.py
wc.log.debug(wc.LOG_PROXY, "%s ssl finished successfully", self)
wc.log.debug(wc.LOG_PROXY, "%s ssl finished successfully (%s)", self, err)
def handle_read (self): """read data from SSL connection, put it into recv_buffer and call process_read""" assert self.connected wc.log.debug(wc.LOG_PROXY, '%s SslConnection.handle_read', self) if len(self.recv_buffer) > wc.proxy.Connection.MAX_BUFSIZE: wc.log.warn(wc.LOG_PROXY, '%s read buffer full', self) return try: data = self.socket.read(wc.proxy.Connection.RECV_BUFSIZE) except SSL.WantReadError: # you _are_ already reading, stupid return except SSL.WantWriteError: # you want to write? here you go self.handle_write() return except SSL.WantX509LookupError, err: wc.log.exception(wc.LOG_PROXY, "%s ssl read message", self) return except SSL.ZeroReturnError, err: wc.log.debug(wc.LOG_PROXY, "%s ssl finished successfully", self) self.delayed_close() return except SSL.Error, err: wc.log.exception(wc.LOG_PROXY, "read error %s", err) self.handle_error('read error') return if not data: # It's been closed, and handle_close has been called wc.log.debug(wc.LOG_PROXY, "%s closed, got empty data", self) return wc.log.debug(wc.LOG_NET, '%s <= read %d', self, len(data)) wc.log.debug(wc.LOG_NET, 'data %r', data) self.recv_buffer += data self.process_read()
2c6283b225556a48147643da0a0cb5f0a3536753 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2c6283b225556a48147643da0a0cb5f0a3536753/SslConnection.py
wc.log.exception(wc.LOG_PROXY, "read error %s", err)
wc.log.exception(wc.LOG_PROXY, "%s read error %s", self, err)
def handle_read (self): """read data from SSL connection, put it into recv_buffer and call process_read""" assert self.connected wc.log.debug(wc.LOG_PROXY, '%s SslConnection.handle_read', self) if len(self.recv_buffer) > wc.proxy.Connection.MAX_BUFSIZE: wc.log.warn(wc.LOG_PROXY, '%s read buffer full', self) return try: data = self.socket.read(wc.proxy.Connection.RECV_BUFSIZE) except SSL.WantReadError: # you _are_ already reading, stupid return except SSL.WantWriteError: # you want to write? here you go self.handle_write() return except SSL.WantX509LookupError, err: wc.log.exception(wc.LOG_PROXY, "%s ssl read message", self) return except SSL.ZeroReturnError, err: wc.log.debug(wc.LOG_PROXY, "%s ssl finished successfully", self) self.delayed_close() return except SSL.Error, err: wc.log.exception(wc.LOG_PROXY, "read error %s", err) self.handle_error('read error') return if not data: # It's been closed, and handle_close has been called wc.log.debug(wc.LOG_PROXY, "%s closed, got empty data", self) return wc.log.debug(wc.LOG_NET, '%s <= read %d', self, len(data)) wc.log.debug(wc.LOG_NET, 'data %r', data) self.recv_buffer += data self.process_read()
2c6283b225556a48147643da0a0cb5f0a3536753 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2c6283b225556a48147643da0a0cb5f0a3536753/SslConnection.py
wc.log.debug(wc.LOG_NET, 'data %r', data)
def handle_read (self): """read data from SSL connection, put it into recv_buffer and call process_read""" assert self.connected wc.log.debug(wc.LOG_PROXY, '%s SslConnection.handle_read', self) if len(self.recv_buffer) > wc.proxy.Connection.MAX_BUFSIZE: wc.log.warn(wc.LOG_PROXY, '%s read buffer full', self) return try: data = self.socket.read(wc.proxy.Connection.RECV_BUFSIZE) except SSL.WantReadError: # you _are_ already reading, stupid return except SSL.WantWriteError: # you want to write? here you go self.handle_write() return except SSL.WantX509LookupError, err: wc.log.exception(wc.LOG_PROXY, "%s ssl read message", self) return except SSL.ZeroReturnError, err: wc.log.debug(wc.LOG_PROXY, "%s ssl finished successfully", self) self.delayed_close() return except SSL.Error, err: wc.log.exception(wc.LOG_PROXY, "read error %s", err) self.handle_error('read error') return if not data: # It's been closed, and handle_close has been called wc.log.debug(wc.LOG_PROXY, "%s closed, got empty data", self) return wc.log.debug(wc.LOG_NET, '%s <= read %d', self, len(data)) wc.log.debug(wc.LOG_NET, 'data %r', data) self.recv_buffer += data self.process_read()
2c6283b225556a48147643da0a0cb5f0a3536753 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2c6283b225556a48147643da0a0cb5f0a3536753/SslConnection.py
def _debugbuf (self): """print debugging information about data buffer status"""
def __str__ (self): return "%s in state %s"%(self.__class__.__name__, str(self.state)) def debugbuf (self): """print debugging information about buffered data"""
def _debugbuf (self): """print debugging information about data buffer status""" debug(FILTER, "self.outbuf %r", self.outbuf.getvalue()) debug(FILTER, "self.tagbuf %r", self.tagbuf) debug(FILTER, "self.waitbuf %r", self.waitbuf) debug(FILTER, "self.inbuf %r", self.inbuf.getvalue())
528577c1e37e3cb64d12e96674d5cbd325e4bbd0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/528577c1e37e3cb64d12e96674d5cbd325e4bbd0/HtmlParser.py
"""Append all tags of the tag buffer to the output buffer"""
"""append serialized tag items of the tag buffer to the output buffer and clear the tag buffer""" debug(FILTER, "%s tagbuf2data", self)
def tagbuf2data (self): """Append all tags of the tag buffer to the output buffer""" tagbuf2data(self.tagbuf, self.outbuf) self.tagbuf = []
528577c1e37e3cb64d12e96674d5cbd325e4bbd0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/528577c1e37e3cb64d12e96674d5cbd325e4bbd0/HtmlParser.py
waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state[0]!='parse':
if self.waitbuf: waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state[0]=='wait':
def feed (self, data): """feed some data to the parser""" if self.state[0]=='parse': # look if we must replay something if self.waited > 0: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state[0]!='parse': self.inbuf.write(data) return data = self.inbuf.getvalue() + data self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data debug(FILTER, "parser feed %r", data) super(HtmlParser, self).feed(data) else: debug(FILTER, "empty parser feed") pass else: # wait state ==> put in input buffer debug(FILTER, "parser wait") self.inbuf.write(data)
528577c1e37e3cb64d12e96674d5cbd325e4bbd0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/528577c1e37e3cb64d12e96674d5cbd325e4bbd0/HtmlParser.py
debug(FILTER, "parser feed %r", data)
debug(FILTER, "%s parser feed %r", self, data)
def feed (self, data): """feed some data to the parser""" if self.state[0]=='parse': # look if we must replay something if self.waited > 0: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state[0]!='parse': self.inbuf.write(data) return data = self.inbuf.getvalue() + data self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data debug(FILTER, "parser feed %r", data) super(HtmlParser, self).feed(data) else: debug(FILTER, "empty parser feed") pass else: # wait state ==> put in input buffer debug(FILTER, "parser wait") self.inbuf.write(data)
528577c1e37e3cb64d12e96674d5cbd325e4bbd0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/528577c1e37e3cb64d12e96674d5cbd325e4bbd0/HtmlParser.py
debug(FILTER, "empty parser feed")
debug(FILTER, "%s empty parser feed", self)
def feed (self, data): """feed some data to the parser""" if self.state[0]=='parse': # look if we must replay something if self.waited > 0: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state[0]!='parse': self.inbuf.write(data) return data = self.inbuf.getvalue() + data self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data debug(FILTER, "parser feed %r", data) super(HtmlParser, self).feed(data) else: debug(FILTER, "empty parser feed") pass else: # wait state ==> put in input buffer debug(FILTER, "parser wait") self.inbuf.write(data)
528577c1e37e3cb64d12e96674d5cbd325e4bbd0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/528577c1e37e3cb64d12e96674d5cbd325e4bbd0/HtmlParser.py
debug(FILTER, "parser wait") self.inbuf.write(data)
assert False, "parser %s has unknown parser state"%str(self)
def feed (self, data): """feed some data to the parser""" if self.state[0]=='parse': # look if we must replay something if self.waited > 0: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state[0]!='parse': self.inbuf.write(data) return data = self.inbuf.getvalue() + data self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data debug(FILTER, "parser feed %r", data) super(HtmlParser, self).feed(data) else: debug(FILTER, "empty parser feed") pass else: # wait state ==> put in input buffer debug(FILTER, "parser wait") self.inbuf.write(data)
528577c1e37e3cb64d12e96674d5cbd325e4bbd0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/528577c1e37e3cb64d12e96674d5cbd325e4bbd0/HtmlParser.py
def flush (self, finish=False): """flush pending data and return the flushed output buffer""" debug(FILTER, "parser flush finish=%s", str(finish))
def flush (self): """flush pending data""" debug(FILTER, "%s flush", self)
def flush (self, finish=False): """flush pending data and return the flushed output buffer""" debug(FILTER, "parser flush finish=%s", str(finish)) if self.waited > 100: error(FILTER, "waited too long for %s"%self.state[1]) # tell recursive background downloaders to stop if hasattr(self.handler, "finish"): self.handler.finish() # switch back to parse self.state = ('parse',) # feeding an empty string will replay() buffered data self.feed("") elif self.state[0]=='wait': # flushing in wait state raises a filter exception self.waited += 1 raise FilterWait("HtmlParser[wait]: waited %d times for %s"%\ (self.waited, self.state[1])) if finish: super(HtmlParser, self).flush() self.tagbuf2data() data = self.outbuf.getvalue() self.outbuf.close() self.outbuf = StringIO() return data
528577c1e37e3cb64d12e96674d5cbd325e4bbd0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/528577c1e37e3cb64d12e96674d5cbd325e4bbd0/HtmlParser.py
error(FILTER, "waited too long for %s"%self.state[1])
assert self.state[0]=='wait', 'parser %s has waited flag set in non-wait state' % str(self) error(FILTER, "%s waited too long", self)
def flush (self, finish=False): """flush pending data and return the flushed output buffer""" debug(FILTER, "parser flush finish=%s", str(finish)) if self.waited > 100: error(FILTER, "waited too long for %s"%self.state[1]) # tell recursive background downloaders to stop if hasattr(self.handler, "finish"): self.handler.finish() # switch back to parse self.state = ('parse',) # feeding an empty string will replay() buffered data self.feed("") elif self.state[0]=='wait': # flushing in wait state raises a filter exception self.waited += 1 raise FilterWait("HtmlParser[wait]: waited %d times for %s"%\ (self.waited, self.state[1])) if finish: super(HtmlParser, self).flush() self.tagbuf2data() data = self.outbuf.getvalue() self.outbuf.close() self.outbuf = StringIO() return data
528577c1e37e3cb64d12e96674d5cbd325e4bbd0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/528577c1e37e3cb64d12e96674d5cbd325e4bbd0/HtmlParser.py
raise FilterWait("HtmlParser[wait]: waited %d times for %s"%\ (self.waited, self.state[1])) if finish: super(HtmlParser, self).flush() self.tagbuf2data()
raise FilterWait("waited %d at parser %s"%(self.waited, str(self))) super(HtmlParser, self).flush() def getoutput (self): """returns all data in output buffer and clears the output buffer"""
def flush (self, finish=False): """flush pending data and return the flushed output buffer""" debug(FILTER, "parser flush finish=%s", str(finish)) if self.waited > 100: error(FILTER, "waited too long for %s"%self.state[1]) # tell recursive background downloaders to stop if hasattr(self.handler, "finish"): self.handler.finish() # switch back to parse self.state = ('parse',) # feeding an empty string will replay() buffered data self.feed("") elif self.state[0]=='wait': # flushing in wait state raises a filter exception self.waited += 1 raise FilterWait("HtmlParser[wait]: waited %d times for %s"%\ (self.waited, self.state[1])) if finish: super(HtmlParser, self).flush() self.tagbuf2data() data = self.outbuf.getvalue() self.outbuf.close() self.outbuf = StringIO() return data
528577c1e37e3cb64d12e96674d5cbd325e4bbd0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/528577c1e37e3cb64d12e96674d5cbd325e4bbd0/HtmlParser.py
def filter_tag (self, tag, attrs): #debug(NIGHTMARE, "rule %s filter_tag" % self.title) part = self.replace[0] #debug(NIGHTMARE, "original tag", tag, "attrs", attrs) #debug(NIGHTMARE, "replace", num_part(part), "with", self.replace[1]) if part==TAGNAME: return (STARTTAG, self.replace[1], attrs) if part==TAG: return (DATA, self.replace[1]) if part==ENCLOSED: return (STARTTAG, tag, attrs) if part==COMPLETE: return [DATA, ""] newattrs = {} # look for matching tag attributes for attr,val in attrs.items(): ro = self.attrs.get(attr) if ro: mo = ro.search(val) if mo: if part==ATTR: if self.replace[1]: newattrs[self.replace[1][0]] = self.replace[1][1] else: # part has to be ATTRVAL # Python has named submatches if mo.groupdict().has_key('replace'): newattrs[attr] = mo.groupdict()['replace'] else: newattrs[attr] = self.replace[1] continue # nothing matched, just append the attribute as is newattrs[attr] = val #debug(NIGHTMARE, "filtered tag", tag, "attrs", newattrs) return (STARTTAG, tag, newattrs)
c2342762b03bad110dbaabe5882f08094c04e5bf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/c2342762b03bad110dbaabe5882f08094c04e5bf/RewriteRule.py
return False chg = False
return chg
def update (config, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False chg = False filemap = {} for filename in filterconf_files(): filemap[os.path.basename(filename)] = filename lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "filter", filename, "not changed, ignoring" continue print >>log, "updating filter", filename else: print >>log, "adding new filter", filename url = baseurl+filename+".gz" page = open_url(url) p = ZapperParser(fullname) p.parse(page, config) chg = config.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "extern filter", filename, "not changed, ignoring" continue print >>log, "updating extern filter", filename else: print >>log, "adding new extern filter", filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, "error fetching", url continue f = file(fullname, 'wb') f.write(page.read()) f.close() return chg
ff26d3c4dce7bb18e52370745ecc28f73cd601b5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ff26d3c4dce7bb18e52370745ecc28f73cd601b5/update.py
for filename in filterconf_files():
for filename in wc.filterconf_files():
def update (config, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False chg = False filemap = {} for filename in filterconf_files(): filemap[os.path.basename(filename)] = filename lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "filter", filename, "not changed, ignoring" continue print >>log, "updating filter", filename else: print >>log, "adding new filter", filename url = baseurl+filename+".gz" page = open_url(url) p = ZapperParser(fullname) p.parse(page, config) chg = config.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "extern filter", filename, "not changed, ignoring" continue print >>log, "updating extern filter", filename else: print >>log, "adding new extern filter", filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, "error fetching", url continue f = file(fullname, 'wb') f.write(page.read()) f.close() return chg
ff26d3c4dce7bb18e52370745ecc28f73cd601b5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ff26d3c4dce7bb18e52370745ecc28f73cd601b5/update.py
lines = page.read().splitlines() for line in lines:
for line in page.read().splitlines():
def update (config, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False chg = False filemap = {} for filename in filterconf_files(): filemap[os.path.basename(filename)] = filename lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "filter", filename, "not changed, ignoring" continue print >>log, "updating filter", filename else: print >>log, "adding new filter", filename url = baseurl+filename+".gz" page = open_url(url) p = ZapperParser(fullname) p.parse(page, config) chg = config.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "extern filter", filename, "not changed, ignoring" continue print >>log, "updating extern filter", filename else: print >>log, "adding new extern filter", filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, "error fetching", url continue f = file(fullname, 'wb') f.write(page.read()) f.close() return chg
ff26d3c4dce7bb18e52370745ecc28f73cd601b5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ff26d3c4dce7bb18e52370745ecc28f73cd601b5/update.py
return False
return chg
def update (config, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False chg = False filemap = {} for filename in filterconf_files(): filemap[os.path.basename(filename)] = filename lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "filter", filename, "not changed, ignoring" continue print >>log, "updating filter", filename else: print >>log, "adding new filter", filename url = baseurl+filename+".gz" page = open_url(url) p = ZapperParser(fullname) p.parse(page, config) chg = config.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "extern filter", filename, "not changed, ignoring" continue print >>log, "updating extern filter", filename else: print >>log, "adding new extern filter", filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, "error fetching", url continue f = file(fullname, 'wb') f.write(page.read()) f.close() return chg
ff26d3c4dce7bb18e52370745ecc28f73cd601b5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ff26d3c4dce7bb18e52370745ecc28f73cd601b5/update.py
fullname = os.path.join(ConfigDir, filename)
fullname = os.path.join(wc.ConfigDir, filename)
def update (config, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False chg = False filemap = {} for filename in filterconf_files(): filemap[os.path.basename(filename)] = filename lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "filter", filename, "not changed, ignoring" continue print >>log, "updating filter", filename else: print >>log, "adding new filter", filename url = baseurl+filename+".gz" page = open_url(url) p = ZapperParser(fullname) p.parse(page, config) chg = config.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "extern filter", filename, "not changed, ignoring" continue print >>log, "updating extern filter", filename else: print >>log, "adding new extern filter", filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, "error fetching", url continue f = file(fullname, 'wb') f.write(page.read()) f.close() return chg
ff26d3c4dce7bb18e52370745ecc28f73cd601b5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ff26d3c4dce7bb18e52370745ecc28f73cd601b5/update.py
p = ZapperParser(fullname)
p = wc.ZapperParser(fullname, compile_data=False)
def update (config, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False chg = False filemap = {} for filename in filterconf_files(): filemap[os.path.basename(filename)] = filename lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "filter", filename, "not changed, ignoring" continue print >>log, "updating filter", filename else: print >>log, "adding new filter", filename url = baseurl+filename+".gz" page = open_url(url) p = ZapperParser(fullname) p.parse(page, config) chg = config.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "extern filter", filename, "not changed, ignoring" continue print >>log, "updating extern filter", filename else: print >>log, "adding new extern filter", filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, "error fetching", url continue f = file(fullname, 'wb') f.write(page.read()) f.close() return chg
ff26d3c4dce7bb18e52370745ecc28f73cd601b5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ff26d3c4dce7bb18e52370745ecc28f73cd601b5/update.py
return False lines = page.read().splitlines() for line in lines:
return chg for line in page.read().splitlines():
def update (config, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False chg = False filemap = {} for filename in filterconf_files(): filemap[os.path.basename(filename)] = filename lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "filter", filename, "not changed, ignoring" continue print >>log, "updating filter", filename else: print >>log, "adding new filter", filename url = baseurl+filename+".gz" page = open_url(url) p = ZapperParser(fullname) p.parse(page, config) chg = config.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "extern filter", filename, "not changed, ignoring" continue print >>log, "updating extern filter", filename else: print >>log, "adding new extern filter", filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, "error fetching", url continue f = file(fullname, 'wb') f.write(page.read()) f.close() return chg
ff26d3c4dce7bb18e52370745ecc28f73cd601b5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ff26d3c4dce7bb18e52370745ecc28f73cd601b5/update.py
return False
return chg
def update (config, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False chg = False filemap = {} for filename in filterconf_files(): filemap[os.path.basename(filename)] = filename lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "filter", filename, "not changed, ignoring" continue print >>log, "updating filter", filename else: print >>log, "adding new filter", filename url = baseurl+filename+".gz" page = open_url(url) p = ZapperParser(fullname) p.parse(page, config) chg = config.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "extern filter", filename, "not changed, ignoring" continue print >>log, "updating extern filter", filename else: print >>log, "adding new extern filter", filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, "error fetching", url continue f = file(fullname, 'wb') f.write(page.read()) f.close() return chg
ff26d3c4dce7bb18e52370745ecc28f73cd601b5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ff26d3c4dce7bb18e52370745ecc28f73cd601b5/update.py
fullname = os.path.join(ConfigDir, filename)
fullname = os.path.join(wc.ConfigDir, filename)
def update (config, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False chg = False filemap = {} for filename in filterconf_files(): filemap[os.path.basename(filename)] = filename lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "filter", filename, "not changed, ignoring" continue print >>log, "updating filter", filename else: print >>log, "adding new filter", filename url = baseurl+filename+".gz" page = open_url(url) p = ZapperParser(fullname) p.parse(page, config) chg = config.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return False lines = page.read().splitlines() for line in lines: if "<" in line: print >>log, "error fetching", url return False if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, "extern filter", filename, "not changed, ignoring" continue print >>log, "updating extern filter", filename else: print >>log, "adding new extern filter", filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, "error fetching", url continue f = file(fullname, 'wb') f.write(page.read()) f.close() return chg
ff26d3c4dce7bb18e52370745ecc28f73cd601b5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ff26d3c4dce7bb18e52370745ecc28f73cd601b5/update.py
config = Configuration()
def _test (): # read local configuration config = Configuration() # test base url for all files baseurl = "http://localhost/~calvin/webcleaner.sf.net/htdocs/test/" update(config, baseurl, dryrun=True)
ff26d3c4dce7bb18e52370745ecc28f73cd601b5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ff26d3c4dce7bb18e52370745ecc28f73cd601b5/update.py
update(config, baseurl, dryrun=True)
update(wc.Configuration(), baseurl, dryrun=True)
def _test (): # read local configuration config = Configuration() # test base url for all files baseurl = "http://localhost/~calvin/webcleaner.sf.net/htdocs/test/" update(config, baseurl, dryrun=True)
ff26d3c4dce7bb18e52370745ecc28f73cd601b5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ff26d3c4dce7bb18e52370745ecc28f73cd601b5/update.py
print "Test: oooooops"
print >>sys.stderr, "Test: oooooops"
def _main (): """USAGE: test/run.sh test/filterfile.py <.html file>""" import sys if len(sys.argv)!=2: print _main.__doc__ sys.exit(1) fname = sys.argv[1] if fname=="-": f = sys.stdin else: f = file(fname) from test import initlog, disable_rating_rules initlog("test/logging.conf") import wc wc.config = wc.Configuration() disable_rating_rules(wc.config) wc.config['filters'] = ['Replacer', 'Rewriter', 'BinaryCharFilter'] wc.config.init_filter_modules() from wc.proxy import proxy_poll, run_timers from wc.filter import FilterException, applyfilter, get_filterattrs from wc.filter import FILTER_RESPONSE_MODIFY attrs = get_filterattrs(fname, [FILTER_RESPONSE_MODIFY]) filtered = "" data = f.read(2048) while data: try: filtered += applyfilter(FILTER_RESPONSE_MODIFY, data, 'filter', attrs) except FilterException, msg: pass data = f.read(2048) i = 1 while True: print >>sys.stderr, "Test: finish", i try: filtered += applyfilter(FILTER_RESPONSE_MODIFY, "", 'finish', attrs) break except FilterException, msg: print >>sys.stderr, "Test: finish: exception:", msg proxy_poll(timeout=max(0, run_timers())) i += 1 if i==200: # background downloading if javascript is too slow print "Test: oooooops" break print "Filtered:", filtered
0ad8b6bd007a4be169bf520b877aa87dd2cf3200 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/0ad8b6bd007a4be169bf520b877aa87dd2cf3200/filterfile.py
if not (scheme and hostname and port):
if not scheme:
def __init__ (self, client, request, headers, content, nofilter, compress, mime=None): self.client = client self.request = request self.headers = headers self.compress = compress self.content = content self.nofilter = nofilter self.mime = mime debug(BRING_IT_ON, "Proxy:", `self.request`) self.method, self.url, protocol = self.request.split() scheme, hostname, port, document = spliturl(self.url) # some clients send partial URI's without scheme, hostname # and port to clients, so we have to handle this if not (scheme and hostname and port): print >>sys.stderr, "Warning: partial request uri:", self.request if not scheme: # default scheme is http scheme = "http" if not hostname: # the 'Host' header has to be there hostname = self.headers.get('Host') if not hostname: # we cannot handle the request self.client.error(400, i18n._("Incomplete Proxy Request")) return if not port: port = 80 # fix missing trailing / if not document: document = '/' # fix missing host headers for HTTP/1.1 if protocol=='HTTP/1.1' and not self.headers.has_key('Host'): self.headers['Host'] = hostname if port!=80: self.headers['Host'] += ":%d"%port debug(HURT_ME_PLENTY, "Proxy: splitted url", scheme, hostname, port, document) if scheme=='file': # a blocked url is a local file:// link # this means we should _not_ use this proxy for local # file links :) mtype = mimetypes.guess_type(self.url)[0] config['requests']['valid'] += 1 config['requests']['blocked'] += 1 ServerHandleDirectly(self.client, 'HTTP/1.0 200 OK\r\n', 'Content-Type: %s\r\n\r\n'%(mtype or 'application/octet-stream'), open(document, 'rb').read()) return
c781e1f42485024a59ee5e27b169bfce026aa971 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/c781e1f42485024a59ee5e27b169bfce026aa971/ClientServerMatchmaker.py
if not scheme:
def __init__ (self, client, request, headers, content, nofilter, compress, mime=None): self.client = client self.request = request self.headers = headers self.compress = compress self.content = content self.nofilter = nofilter self.mime = mime debug(BRING_IT_ON, "Proxy:", `self.request`) self.method, self.url, protocol = self.request.split() scheme, hostname, port, document = spliturl(self.url) # some clients send partial URI's without scheme, hostname # and port to clients, so we have to handle this if not (scheme and hostname and port): print >>sys.stderr, "Warning: partial request uri:", self.request if not scheme: # default scheme is http scheme = "http" if not hostname: # the 'Host' header has to be there hostname = self.headers.get('Host') if not hostname: # we cannot handle the request self.client.error(400, i18n._("Incomplete Proxy Request")) return if not port: port = 80 # fix missing trailing / if not document: document = '/' # fix missing host headers for HTTP/1.1 if protocol=='HTTP/1.1' and not self.headers.has_key('Host'): self.headers['Host'] = hostname if port!=80: self.headers['Host'] += ":%d"%port debug(HURT_ME_PLENTY, "Proxy: splitted url", scheme, hostname, port, document) if scheme=='file': # a blocked url is a local file:// link # this means we should _not_ use this proxy for local # file links :) mtype = mimetypes.guess_type(self.url)[0] config['requests']['valid'] += 1 config['requests']['blocked'] += 1 ServerHandleDirectly(self.client, 'HTTP/1.0 200 OK\r\n', 'Content-Type: %s\r\n\r\n'%(mtype or 'application/octet-stream'), open(document, 'rb').read()) return
c781e1f42485024a59ee5e27b169bfce026aa971 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/c781e1f42485024a59ee5e27b169bfce026aa971/ClientServerMatchmaker.py
if not hostname:
if scheme!='file' and not hostname:
def __init__ (self, client, request, headers, content, nofilter, compress, mime=None): self.client = client self.request = request self.headers = headers self.compress = compress self.content = content self.nofilter = nofilter self.mime = mime debug(BRING_IT_ON, "Proxy:", `self.request`) self.method, self.url, protocol = self.request.split() scheme, hostname, port, document = spliturl(self.url) # some clients send partial URI's without scheme, hostname # and port to clients, so we have to handle this if not (scheme and hostname and port): print >>sys.stderr, "Warning: partial request uri:", self.request if not scheme: # default scheme is http scheme = "http" if not hostname: # the 'Host' header has to be there hostname = self.headers.get('Host') if not hostname: # we cannot handle the request self.client.error(400, i18n._("Incomplete Proxy Request")) return if not port: port = 80 # fix missing trailing / if not document: document = '/' # fix missing host headers for HTTP/1.1 if protocol=='HTTP/1.1' and not self.headers.has_key('Host'): self.headers['Host'] = hostname if port!=80: self.headers['Host'] += ":%d"%port debug(HURT_ME_PLENTY, "Proxy: splitted url", scheme, hostname, port, document) if scheme=='file': # a blocked url is a local file:// link # this means we should _not_ use this proxy for local # file links :) mtype = mimetypes.guess_type(self.url)[0] config['requests']['valid'] += 1 config['requests']['blocked'] += 1 ServerHandleDirectly(self.client, 'HTTP/1.0 200 OK\r\n', 'Content-Type: %s\r\n\r\n'%(mtype or 'application/octet-stream'), open(document, 'rb').read()) return
c781e1f42485024a59ee5e27b169bfce026aa971 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/c781e1f42485024a59ee5e27b169bfce026aa971/ClientServerMatchmaker.py
if not port:
if scheme!='file' and not port:
def __init__ (self, client, request, headers, content, nofilter, compress, mime=None): self.client = client self.request = request self.headers = headers self.compress = compress self.content = content self.nofilter = nofilter self.mime = mime debug(BRING_IT_ON, "Proxy:", `self.request`) self.method, self.url, protocol = self.request.split() scheme, hostname, port, document = spliturl(self.url) # some clients send partial URI's without scheme, hostname # and port to clients, so we have to handle this if not (scheme and hostname and port): print >>sys.stderr, "Warning: partial request uri:", self.request if not scheme: # default scheme is http scheme = "http" if not hostname: # the 'Host' header has to be there hostname = self.headers.get('Host') if not hostname: # we cannot handle the request self.client.error(400, i18n._("Incomplete Proxy Request")) return if not port: port = 80 # fix missing trailing / if not document: document = '/' # fix missing host headers for HTTP/1.1 if protocol=='HTTP/1.1' and not self.headers.has_key('Host'): self.headers['Host'] = hostname if port!=80: self.headers['Host'] += ":%d"%port debug(HURT_ME_PLENTY, "Proxy: splitted url", scheme, hostname, port, document) if scheme=='file': # a blocked url is a local file:// link # this means we should _not_ use this proxy for local # file links :) mtype = mimetypes.guess_type(self.url)[0] config['requests']['valid'] += 1 config['requests']['blocked'] += 1 ServerHandleDirectly(self.client, 'HTTP/1.0 200 OK\r\n', 'Content-Type: %s\r\n\r\n'%(mtype or 'application/octet-stream'), open(document, 'rb').read()) return
c781e1f42485024a59ee5e27b169bfce026aa971 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/c781e1f42485024a59ee5e27b169bfce026aa971/ClientServerMatchmaker.py
headers.addheader("Via", "1.1 unknown")
headers.addheader("Via", "1.1 unknown\r")
def set_via_header (headers): """ Set "Via:" header. """ headers.addheader("Via", "1.1 unknown")
5c84c67327b9b90462d5c1a09f956adea73bc0f1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5c84c67327b9b90462d5c1a09f956adea73bc0f1/Headers.py
headers.addheader('Warning', warning)
headers.addheader('Warning', warning+"\r")
def remove_warning_headers (headers): """ Remove old warning headers. """ if "Warning" not in headers: return tokeep = [] date = wc.http.date.parse_http_date(headers['Date']) for warning in headers.getheaders("Warning"): warncode, warnagent, warntext, warndate = \ wc.http.parse_http_warning(warning) if warndate is None or warndate == date: tokeep.append(warning) else: wc.log.debug(wc.LOG_PROXY, "delete warning %s from %s", warning, headers) del headers['Warning'] for warning in tokeep: headers.addheader('Warning', warning)
5c84c67327b9b90462d5c1a09f956adea73bc0f1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5c84c67327b9b90462d5c1a09f956adea73bc0f1/Headers.py
headers.addheader('Trailer', trailer)
headers.addheader('Trailer', trailer+'\r')
def check_trailer_headers (headers): """ Message header fields listed in the Trailer header field MUST NOT include the following header fields: . Transfer-Encoding . Content-Length . Trailer """ if "Trailer" not in headers: return tokeep = [] for trailer in headers.getheaders("Trailer"): if trailer.lower() not in forbidden_trailer_names: tokeep.append(trailer) del headers['Trailer'] for trailer in tokeep: headers.addheader('Trailer', trailer)
5c84c67327b9b90462d5c1a09f956adea73bc0f1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5c84c67327b9b90462d5c1a09f956adea73bc0f1/Headers.py
urlparts = [ urllib.quote(p) for p in urlparts ] return urlparse.urlunparse(urlparts)
urlparts = [ urllib.quote(p, '/=&') for p in urlparts ] url = urlparse.urlunparse(urlparts) return url
def norm_url (url): """replace empty paths with / and normalize them""" url = urllib.unquote(url) urlparts = list(urlparse.urlparse(url)) path = urlparts[2].replace('\\', '/') if not path or path=='/': urlparts[2] = '/' else: # XXX this works only under windows and posix?? # collapse redundant path segments urlparts[2] = os.path.normpath(path).replace('\\', '/') if path.endswith('/'): urlparts[2] += '/' urlparts = [ urllib.quote(p) for p in urlparts ] return urlparse.urlunparse(urlparts)
04af75482d54d0ac02ac20e68d0bea74e87779ae /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/04af75482d54d0ac02ac20e68d0bea74e87779ae/__init__.py
toremove = _getlist(form, 'rule_matchurls')
toremove = [u for u in _getlist(form, 'rule_matchurls') if u in currule.matchurls]
def _form_rule_delmatchurls (form): toremove = _getlist(form, 'rule_matchurls') if toremove: for matchurl in toremove: currule.matchurls.remove(matchurl) currule.compile_matchurls() info['rulematchurl'] = True
e379f3e0f4364df5958caae668f731dd8da90a78 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e379f3e0f4364df5958caae668f731dd8da90a78/filterconfig_html.py
toremove = _getlist(form, 'rule_nomatchurls')
toremove = [u for u in _getlist(form, 'rule_nomatchurls') if u in currule.nomatchurls]
def _form_rule_delnomatchurls (form): toremove = _getlist(form, 'rule_nomatchurls') if toremove: for nomatchurl in toremove: currule.nomatchurls.remove(nomatchurl) currule.compile_nomatchurls() info['rulenomatchurl'] = True
e379f3e0f4364df5958caae668f731dd8da90a78 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/e379f3e0f4364df5958caae668f731dd8da90a78/filterconfig_html.py
"charset": wc.ConfigCharset, "title_en": wc.XmlUtils.xmlquote("%s %s" % (ftype.capitalize(), cat)), "title_de": wc.XmlUtils.xmlquote("%s %s" % (transtypes[ftype]['de'].capitalize(), transcats[cat]['de'].capitalize())), "desc_en": wc.XmlUtils.xmlquote("Automatically generated on %s" % date),
"charset": wc.configuration.ConfigCharset, "title_en": wc.XmlUtils.xmlquote("%s %s" % (ftype.capitalize(), cat)), "title_de": wc.XmlUtils.xmlquote("%s %s" % (transtypes[ftype]['de'].capitalize(), transcats[cat]['de'].capitalize())), "desc_en": wc.XmlUtils.xmlquote( "Automatically generated on %s" % date),
def write_folder (cat, ftype, data, f): print "write", cat, "folder" d = { "charset": wc.ConfigCharset, "title_en": wc.XmlUtils.xmlquote("%s %s" % (ftype.capitalize(), cat)), "title_de": wc.XmlUtils.xmlquote("%s %s" % (transtypes[ftype]['de'].capitalize(), transcats[cat]['de'].capitalize())), "desc_en": wc.XmlUtils.xmlquote("Automatically generated on %s" % date), "desc_de": wc.XmlUtils.xmlquote("Automatisch generiert am %s" % date), } f.write("""<?xml version="1.0" encoding="%(charset)s"?>
a2607428cab6b55ff28963fb0df9e14e9accf6b5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/a2607428cab6b55ff28963fb0df9e14e9accf6b5/bl2wc.py