rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
self._debug(NIGHTMARE, "JS: document.write", `data`)
def jsProcessData (self, data): """process data produced by document.write() JavaScript""" self._debug(NIGHTMARE, "JS: document.write", `data`) self.js_output += 1 # parse recursively self.js_html.feed(data)
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "JS: popup")
def jsProcessPopup (self): """process javascript popup""" self._debug(NIGHTMARE, "JS: popup") self.js_popup += 1
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "buf_append_data")
def buf_append_data (self, data): """we have to make sure that we have no two following DATA things in the tag buffer. Why? To be 100% sure that an ENCLOSED match really matches enclosed data. """ self._debug(NIGHTMARE, "buf_append_data") if data[0]==DATA and self.buf and self.buf[-1][0]==DATA: self.buf[-1][1] += data[1] else: self.buf.append(data)
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "flushbuf")
def flushbuf (self): """clear and return the output buffer""" self._debug(NIGHTMARE, "flushbuf") data = self.outbuf.getvalue() self.outbuf.close() self.outbuf = StringIO() return data
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "self.outbuf", `self.outbuf.getvalue()`) self._debug(NIGHTMARE, "self.buf", `self.buf`) self._debug(NIGHTMARE, "self.waitbuf", `self.waitbuf`) self._debug(NIGHTMARE, "self.inbuf", `self.inbuf.getvalue()`)
def _debugbuf (self): """print debugging information about data buffer status""" self._debug(NIGHTMARE, "self.outbuf", `self.outbuf.getvalue()`) self._debug(NIGHTMARE, "self.buf", `self.buf`) self._debug(NIGHTMARE, "self.waitbuf", `self.waitbuf`) self._debug(NIGHTMARE, "self.inbuf", `self.inbuf.getvalue()`)
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "feed", `data`)
def feed (self, data): """feed some data to the parser""" if self.state=='parse': # look if we must replay something if self.waited: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state!='parse': return data = self.inbuf.getvalue() + data self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data self._debug(NIGHTMARE, "feed", `data`) self.parser.feed(data) else: self._debug(NIGHTMARE, "feed") pass else: # wait state --> put in input buffer self._debug(NIGHTMARE, "wait") self.inbuf.write(data)
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "feed")
def feed (self, data): """feed some data to the parser""" if self.state=='parse': # look if we must replay something if self.waited: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state!='parse': return data = self.inbuf.getvalue() + data self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data self._debug(NIGHTMARE, "feed", `data`) self.parser.feed(data) else: self._debug(NIGHTMARE, "feed") pass else: # wait state --> put in input buffer self._debug(NIGHTMARE, "wait") self.inbuf.write(data)
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "wait")
def feed (self, data): """feed some data to the parser""" if self.state=='parse': # look if we must replay something if self.waited: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state!='parse': return data = self.inbuf.getvalue() + data self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data self._debug(NIGHTMARE, "feed", `data`) self.parser.feed(data) else: self._debug(NIGHTMARE, "feed") pass else: # wait state --> put in input buffer self._debug(NIGHTMARE, "wait") self.inbuf.write(data)
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(HURT_ME_PLENTY, "flush")
def flush (self): self._debug(HURT_ME_PLENTY, "flush") # flushing in wait state raises a filter exception if self.state=='wait': raise FilterWait("HtmlParser[%d]: waiting for data"%self.level) self.parser.flush()
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "replay", waitbuf)
def replay (self, waitbuf): """call the handler functions again with buffer data""" self._debug(NIGHTMARE, "replay", waitbuf) for item in waitbuf: if item[0]==DATA: self._data(item[1]) elif item[0]==STARTTAG: self.startElement(item[1], item[2]) elif item[0]==ENDTAG: self.endElement(item[1]) elif item[0]==COMMENT: self.comment(item[1])
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "cdata", `data`)
def cdata (self, data): """character data""" self._debug(NIGHTMARE, "cdata", `data`) return self._data(data)
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "characters", `data`)
def characters (self, data): """characters""" self._debug(NIGHTMARE, "characters", `data`) return self._data(data)
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "comment", `data`)
def comment (self, data): """a comment; accept only non-empty comments""" self._debug(NIGHTMARE, "comment", `data`) item = [COMMENT, data] if self.state=='wait': return self.waitbuf.append(item) if self.comments and data: self.buf.append(item)
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "doctype", `data`)
def doctype (self, data): self._debug(NIGHTMARE, "doctype", `data`) return self._data("<!DOCTYPE%s>"%data)
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "pi", `data`)
def pi (self, data): self._debug(NIGHTMARE, "pi", `data`) return self._data("<?%s?>"%data)
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "startElement", `tag`)
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug(NIGHTMARE, "startElement", `tag`) tag = check_spelling(tag, self.url) item = [STARTTAG, tag, attrs] if self.state=='wait': return self.waitbuf.append(item) rulelist = [] filtered = 0 if tag=="meta" and \ attrs.get('http-equiv', '').lower() =='pics-label': labels = resolve_html_entities(attrs.get('content', '')) # note: if there are no pics rules, this loop is empty for rule in self.pics: msg = check_pics(rule, labels) if msg: raise FilterPics(msg) # first labels match counts self.pics = [] elif tag=="body": # headers finished if self.pics: # no pics data found self.pics = [] # look for filter rules which apply for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug(NIGHTMARE, "matched rule %s on tag %s" % (`rule.title`, `tag`)) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = "True" if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug(NIGHTMARE, "put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.buf) self.rulestack.append((pos, rulelist)) if filtered: self.buf_append_data(item) elif self.js_filter: # if its not yet filtered, try filter javascript self.jsStartElement(tag, attrs) else: self.buf.append(item) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.js_filter: self.buf2data()
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "matched rule %s on tag %s" % (`rule.title`, `tag`))
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug(NIGHTMARE, "startElement", `tag`) tag = check_spelling(tag, self.url) item = [STARTTAG, tag, attrs] if self.state=='wait': return self.waitbuf.append(item) rulelist = [] filtered = 0 if tag=="meta" and \ attrs.get('http-equiv', '').lower() =='pics-label': labels = resolve_html_entities(attrs.get('content', '')) # note: if there are no pics rules, this loop is empty for rule in self.pics: msg = check_pics(rule, labels) if msg: raise FilterPics(msg) # first labels match counts self.pics = [] elif tag=="body": # headers finished if self.pics: # no pics data found self.pics = [] # look for filter rules which apply for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug(NIGHTMARE, "matched rule %s on tag %s" % (`rule.title`, `tag`)) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = "True" if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug(NIGHTMARE, "put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.buf) self.rulestack.append((pos, rulelist)) if filtered: self.buf_append_data(item) elif self.js_filter: # if its not yet filtered, try filter javascript self.jsStartElement(tag, attrs) else: self.buf.append(item) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.js_filter: self.buf2data()
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "put on buffer")
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug(NIGHTMARE, "startElement", `tag`) tag = check_spelling(tag, self.url) item = [STARTTAG, tag, attrs] if self.state=='wait': return self.waitbuf.append(item) rulelist = [] filtered = 0 if tag=="meta" and \ attrs.get('http-equiv', '').lower() =='pics-label': labels = resolve_html_entities(attrs.get('content', '')) # note: if there are no pics rules, this loop is empty for rule in self.pics: msg = check_pics(rule, labels) if msg: raise FilterPics(msg) # first labels match counts self.pics = [] elif tag=="body": # headers finished if self.pics: # no pics data found self.pics = [] # look for filter rules which apply for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug(NIGHTMARE, "matched rule %s on tag %s" % (`rule.title`, `tag`)) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = "True" if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug(NIGHTMARE, "put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.buf) self.rulestack.append((pos, rulelist)) if filtered: self.buf_append_data(item) elif self.js_filter: # if its not yet filtered, try filter javascript self.jsStartElement(tag, attrs) else: self.buf.append(item) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.js_filter: self.buf2data()
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "endElement", `tag`)
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "JS: del", `name`, "from", `tag`)
def jsStartElement (self, tag, attrs): """Check popups for onmouseout and onmouseover. Inline extern javascript sources""" changed = 0 self.js_src = None self.js_output = 0 self.js_popup = 0 for name in ('onmouseover', 'onmouseout'): if attrs.has_key(name) and self.jsPopup(attrs, name): self._debug(NIGHTMARE, "JS: del", `name`, "from", `tag`) del attrs[name] changed = 1 if tag=='form': name = attrs.get('name', attrs.get('id')) self.jsForm(name, attrs.get('action', ''), attrs.get('target', '')) elif tag=='script': lang = attrs.get('language', '').lower() url = attrs.get('src', '') scrtype = attrs.get('type', '').lower() is_js = scrtype=='text/javascript' or \ lang.startswith('javascript') or \ not (lang or scrtype) if is_js and url: return self.jsScriptSrc(url, lang) self.buf.append([STARTTAG, tag, attrs])
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "JS: jsPopup")
def jsPopup (self, attrs, name): """check if attrs[name] javascript opens a popup window""" self._debug(NIGHTMARE, "JS: jsPopup") val = resolve_html_entities(attrs[name]) if not val: return self.js_env.attachListener(self) try: self.js_env.executeScriptAsFunction(val, 0.0) except jslib.error, msg: pass self.js_env.detachListener(self) res = self.js_popup self.js_popup = 0 return res
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(HURT_ME_PLENTY, "jsForm", `name`, `action`, `target`)
def jsForm (self, name, action, target): """when hitting a (named) form, notify the JS engine about that""" if not name: return self._debug(HURT_ME_PLENTY, "jsForm", `name`, `action`, `target`) self.js_env.addForm(name, action, target)
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "switching back to parse with")
def jsScriptData (self, data, url, ver): """Callback for loading <script src=""> data in the background If downloading is finished, data is None""" assert self.state=='wait' if data is None: if not self.js_script: print >> sys.stderr, "HtmlParser[%d]: empty JS src"%self.level, url else: self.buf.append([STARTTAG, "script", {'type': 'text/javascript'}]) script = "<!--\n%s\n//-->"%escape_js(self.js_script) self.buf.append([DATA, script]) # Note: <script src=""> could be missing an end tag, # but now we need one. Look later for a duplicate </script>. self.buf.append([ENDTAG, "script"]) self.js_script = '' self.state = 'parse' self._debug(NIGHTMARE, "switching back to parse with") self._debugbuf() else: self._debug(HURT_ME_PLENTY, "JS read", len(data), "<=", url) self.js_script += data
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(HURT_ME_PLENTY, "JS read", len(data), "<=", url)
def jsScriptData (self, data, url, ver): """Callback for loading <script src=""> data in the background If downloading is finished, data is None""" assert self.state=='wait' if data is None: if not self.js_script: print >> sys.stderr, "HtmlParser[%d]: empty JS src"%self.level, url else: self.buf.append([STARTTAG, "script", {'type': 'text/javascript'}]) script = "<!--\n%s\n//-->"%escape_js(self.js_script) self.buf.append([DATA, script]) # Note: <script src=""> could be missing an end tag, # but now we need one. Look later for a duplicate </script>. self.buf.append([ENDTAG, "script"]) self.js_script = '' self.state = 'parse' self._debug(NIGHTMARE, "switching back to parse with") self._debugbuf() else: self._debug(HURT_ME_PLENTY, "JS read", len(data), "<=", url) self.js_script += data
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(HURT_ME_PLENTY, "JS jsScriptSrc", `url`, `ver`)
def jsScriptSrc (self, url, language): """Start a background download for <script src=""> tags""" assert self.state=='parse' ver = 0.0 if language: mo = re.search(r'(?i)javascript(?P<num>\d\.\d)', language) if mo: ver = float(mo.group('num')) url = urlparse.urljoin(self.url, url) self._debug(HURT_ME_PLENTY, "JS jsScriptSrc", `url`, `ver`) if _has_ws(url): print >> sys.stderr, "HtmlParser[%d]: broken JS url"%self.level,\ `url`, "at", `self.url` return self.state = 'wait' self.waited = 'True' self.js_src = 'True' client = HttpProxyClient(self.jsScriptData, (url, ver)) ClientServerMatchmaker(client, "GET %s HTTP/1.1" % url, #request {}, #headers '', #content {'nofilter': None}, # nofilter 'identity', # compress mime = "application/x-javascript", )
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "JS: jsScript", ver, `script`)
def jsScript (self, script, ver, item): """execute given script with javascript version ver""" self._debug(NIGHTMARE, "JS: jsScript", ver, `script`) assert self.state == 'parse' assert len(self.buf) >= 2 self.js_output = 0 self.js_env.attachListener(self) # start recursive html filter (used by jsProcessData) self.js_html = FilterHtmlParser(self.rules, self.pics, self.url, comments=self.comments, javascript=self.js_filter, level=self.level+1) # execute self.js_env.executeScript(unescape_js(script), ver) self.js_env.detachListener(self) # wait for recursive filter to finish self.jsEndScript(item)
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "JS: endScript")
def jsEndScript (self, item): self._debug(NIGHTMARE, "JS: endScript") assert len(self.buf) >= 2 if self.js_output: try: self.js_html.feed('') self.js_html.flush() except FilterWait: self.state = 'wait' self.waited = 'True' make_timer(0.1, lambda : self.jsEndScript(item)) return self.js_html._debugbuf() assert not self.js_html.inbuf.getvalue() assert not self.js_html.waitbuf assert len(self.buf) >= 2 self.buf[-2:-2] = [[DATA, self.js_html.outbuf.getvalue()]]+self.js_html.buf self.js_html = None if (self.js_popup + self.js_output) > 0: # delete old script del self.buf[-1] del self.buf[-1] elif not self.filterEndElement(item[1]): self.buf.append(item) self._debug(NIGHTMARE, "JS: switching back to parse with") self._debugbuf() self.state = 'parse'
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
self._debug(NIGHTMARE, "JS: switching back to parse with")
def jsEndScript (self, item): self._debug(NIGHTMARE, "JS: endScript") assert len(self.buf) >= 2 if self.js_output: try: self.js_html.feed('') self.js_html.flush() except FilterWait: self.state = 'wait' self.waited = 'True' make_timer(0.1, lambda : self.jsEndScript(item)) return self.js_html._debugbuf() assert not self.js_html.inbuf.getvalue() assert not self.js_html.waitbuf assert len(self.buf) >= 2 self.buf[-2:-2] = [[DATA, self.js_html.outbuf.getvalue()]]+self.js_html.buf self.js_html = None if (self.js_popup + self.js_output) > 0: # delete old script del self.buf[-1] del self.buf[-1] elif not self.filterEndElement(item[1]): self.buf.append(item) self._debug(NIGHTMARE, "JS: switching back to parse with") self._debugbuf() self.state = 'parse'
2235a065dd104f9a4252037ab7cb36681acd5b89 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2235a065dd104f9a4252037ab7cb36681acd5b89/HtmlParser.py
if self.addr[0] not in config['allowed_hosts']:
if not config['allowedhosts'].has_key(self.addr[0]):
def __init__ (self, socket, addr): Connection.__init__(self, socket) self.addr = addr self.state = 'request' self.server = None self.request = '' self.headers = None self.bytes_remaining = None # for content only self.content = '' if self.addr[0] not in config['allowed_hosts']: self.close()
867e53561cb3cce104da8b9c721509c7f19ca606 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/867e53561cb3cce104da8b9c721509c7f19ca606/HttpClient.py
if self.enclosed and self.tag in NO_CLOSE_TAGS: raise ValueError, "reading rule %r: tag %r has no end tag, " \ "so specifying an enclose value is invalid." % \ (self.titles['en'], tag)
def __init__ (self, sid=None, titles=None, descriptions=None, disable=0, tag=u"a", attrs=None, enclosed=u"", part=wc.filter.html.COMPLETE, replacement=u""): """ Initialize rule data. """ super(HtmlrewriteRule, self).__init__(sid=sid, titles=titles, descriptions=descriptions, disable=disable) self.tag = tag self.tag_ro = None if attrs is None: self.attrs = {} else: self.attrs = attrs self.attrs_ro = {} self.part = part self.replacement = replacement self.enclosed = enclosed self.enclosed_ro = None if self.enclosed and self.tag in NO_CLOSE_TAGS: raise ValueError, "reading rule %r: tag %r has no end tag, " \ "so specifying an enclose value is invalid." % \ (self.titles['en'], tag) self.attrnames.append('tag')
5df9a57bb2b61cf160c233fd653800d8acb18754 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5df9a57bb2b61cf160c233fd653800d8acb18754/HtmlrewriteRule.py
if self.tag in NO_CLOSE_TAGS: return True
for tag in NO_CLOSE_TAGS: if self.match_tag(tag): return True
def matches_starttag (self): """ See if this rule matches start tags. """ if self.tag in NO_CLOSE_TAGS: return True return self.part not in [ wc.filter.html.ENCLOSED, wc.filter.html.COMPLETE, ]
5df9a57bb2b61cf160c233fd653800d8acb18754 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5df9a57bb2b61cf160c233fd653800d8acb18754/HtmlrewriteRule.py
if self.tag in NO_CLOSE_TAGS: return False
for tag in NO_CLOSE_TAGS: if self.match_tag(tag): return False
def matches_endtag (self): """ See if this rule matches end tags. """ if self.tag in NO_CLOSE_TAGS: return False return self.part not in [ wc.filter.html.ATTR, wc.filter.html.ATTRVAL, wc.filter.html.ATTRNAME, ]
5df9a57bb2b61cf160c233fd653800d8acb18754 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5df9a57bb2b61cf160c233fd653800d8acb18754/HtmlrewriteRule.py
def XtestScriptSrc1 (self):
def testScriptSrc1 (self):
def XtestScriptSrc1 (self): self.filt(
b49dd87fac36792099044bb63fbf6476f7b83c01 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/b49dd87fac36792099044bb63fbf6476f7b83c01/TestScriptSrc.py
def XtestScriptSrc2 (self):
def testScriptSrc2 (self):
def XtestScriptSrc2 (self): self.filt(
b49dd87fac36792099044bb63fbf6476f7b83c01 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/b49dd87fac36792099044bb63fbf6476f7b83c01/TestScriptSrc.py
def XtestScriptSrc3 (self):
def testScriptSrc3 (self):
def XtestScriptSrc3 (self): """missing </script>""" self.filt(
b49dd87fac36792099044bb63fbf6476f7b83c01 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/b49dd87fac36792099044bb63fbf6476f7b83c01/TestScriptSrc.py
print "line", `l`
def parse_headers (): headers = [] try: s = get_data("/headers/") #debug(BRING_IT_ON, "headers data", s) except (IOError, ValueError): print >> sys.stderr, _("WebCleaner is not running") return headers if s=="-": return headers lines = s.split("\n") for l in lines: print "line", `l` # strip off paranthesis l = l[1:-1] # split into three parts url, io, hlist = l.split(", ", 2) # split headers hlist = (hlist.strip())[2:-2].split("', '") # strip headers hlist = map(lambda x: x.replace("\\r", ""), hlist) hlist = map(lambda x: x.replace("\\n", ""), hlist) hlist = map(lambda x: x.split(":", 1), hlist) # append headers.append([url[1:-1], int(io), hlist]) return headers
47761498dc644faabb60e83c0bebe297ee674e10 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/47761498dc644faabb60e83c0bebe297ee674e10/HeaderWindow.py
print >>sys.stderr, "JS:", data
self.jsfilter.feed(data)
def processData (self, data): print >>sys.stderr, "JS:", data # XXX parse recursively
35fc60315c55232a57ddd202478a1748ebc7468a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/35fc60315c55232a57ddd202478a1748ebc7468a/Rewriter.py
self.buffer.append((ENDTAG, tag))
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
35fc60315c55232a57ddd202478a1748ebc7468a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/35fc60315c55232a57ddd202478a1748ebc7468a/Rewriter.py
if not filtered and self.javascript:
if not filtered and self.javascript and tag=='script':
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
35fc60315c55232a57ddd202478a1748ebc7468a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/35fc60315c55232a57ddd202478a1748ebc7468a/Rewriter.py
if tag!='script': return if not self.buffer: print >>sys.stderr, "empty buffer on </script>"
if len(self.buffer)<2: print >>sys.stderr, "short buffer on </script>", self.buffer
def jsEndElement (self, tag): """parse generated html for scripts""" if tag!='script': return if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last return script = last[1].strip() if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val, 0.0) self.jsEnv.detachListener(self)
35fc60315c55232a57ddd202478a1748ebc7468a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/35fc60315c55232a57ddd202478a1748ebc7468a/Rewriter.py
last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last
if self.buffer[-1][0]!=DATA or self.buffer[-2][0]!=STARTTAG: print >>sys.stderr, "missing tags for </script>", self.buffer[-2:]
def jsEndElement (self, tag): """parse generated html for scripts""" if tag!='script': return if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last return script = last[1].strip() if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val, 0.0) self.jsEnv.detachListener(self)
35fc60315c55232a57ddd202478a1748ebc7468a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/35fc60315c55232a57ddd202478a1748ebc7468a/Rewriter.py
script = last[1].strip()
script = self.buffer[-1][1].strip() self.buffer[-2:] = []
def jsEndElement (self, tag): """parse generated html for scripts""" if tag!='script': return if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last return script = last[1].strip() if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val, 0.0) self.jsEnv.detachListener(self)
35fc60315c55232a57ddd202478a1748ebc7468a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/35fc60315c55232a57ddd202478a1748ebc7468a/Rewriter.py
self.jsEnv.executeScriptAsFunction(val, 0.0)
self.jsfilter = HtmlFilter(self.rules, self.document, comments=self.comments, javascript=self.javascript) self.jsEnv.executeScriptAsFunction(script, 0.0)
def jsEndElement (self, tag): """parse generated html for scripts""" if tag!='script': return if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last return script = last[1].strip() if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val, 0.0) self.jsEnv.detachListener(self)
35fc60315c55232a57ddd202478a1748ebc7468a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/35fc60315c55232a57ddd202478a1748ebc7468a/Rewriter.py
if attrs.get('imgreducer_convert'): img = img.convert()
if img.mode not in ('RGB', 'L'): img.draft("RGB", img.size) img = img.convert("RGB")
def finish (self, data, attrs): """ Feed image data to buffer, then convert it and return result. """ if self.init_image_reducer: self.set_ctype_header(attrs) self.init_image_reducer = False if not attrs.has_key('imgreducer_buf'): return data p = attrs['imgreducer_buf'] if data: p.write(data) p.seek(0) try: img = Image.open(p) data = StringIO.StringIO() if attrs.get('imgreducer_convert'): img = img.convert() img.save(data, "JPEG", quality=self.quality, optimize=1) except IOError, msg: # return original image data on error wc.log.warn(wc.LOG_FILTER, "I/O error reading image data: %s", str(msg)) # XXX the content type is pretty sure wrong return p.getvalue() return data.getvalue()
13b8aa23c835eaed0a2ae53aa82e5260107086cf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/13b8aa23c835eaed0a2ae53aa82e5260107086cf/ImageReducer.py
"I/O error reading image data: %s", str(msg))
"I/O error reading image data %r: %s", attrs['url'], str(msg))
def finish (self, data, attrs): """ Feed image data to buffer, then convert it and return result. """ if self.init_image_reducer: self.set_ctype_header(attrs) self.init_image_reducer = False if not attrs.has_key('imgreducer_buf'): return data p = attrs['imgreducer_buf'] if data: p.write(data) p.seek(0) try: img = Image.open(p) data = StringIO.StringIO() if attrs.get('imgreducer_convert'): img = img.convert() img.save(data, "JPEG", quality=self.quality, optimize=1) except IOError, msg: # return original image data on error wc.log.warn(wc.LOG_FILTER, "I/O error reading image data: %s", str(msg)) # XXX the content type is pretty sure wrong return p.getvalue() return data.getvalue()
13b8aa23c835eaed0a2ae53aa82e5260107086cf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/13b8aa23c835eaed0a2ae53aa82e5260107086cf/ImageReducer.py
d['imgreducer_convert'] = convert(ctype)
def get_attrs (self, url, localhost, stages, headers): """ Initialize image reducer buffer and flags. """ if not self.applies_to_stages(stages): return {} # don't filter tiny images d = super(ImageReducer, self).get_attrs(url, localhost, stages, headers) # weed out the rules that don't apply to this url rules = [ rule for rule in self.rules if rule.applies_to_url(url) ] if rules: if len(rules) > 1: wc.log.warn(wc.LOG_FILTER, "more than one rule matched %r: %s", url, str(rules)) # first rule wins quality = rules[0].quality minimal_size_bytes = rules[0].minimal_size_bytes else: quality = self.quality minimal_size_bytes = self.minimal_size_bytes try: length = int(headers['server'].get('Content-Length', 0)) except ValueError: wc.log.warn(wc.LOG_FILTER, "invalid content length at %r", url) return d if length < 0: wc.log.warn(wc.LOG_FILTER, "negative content length at %r", url) return d if length == 0: wc.log.warn(wc.LOG_FILTER, "missing content length at %r", url) elif 0 < length < minimal_size_bytes: return d d['imgreducer_buf'] = StringIO.StringIO() # some images have to be convert()ed before saving ctype = headers['server'].get('Content-Type') d['imgreducer_convert'] = convert(ctype) return d
13b8aa23c835eaed0a2ae53aa82e5260107086cf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/13b8aa23c835eaed0a2ae53aa82e5260107086cf/ImageReducer.py
def convert (ctype): """ Return True if an image has to be convert()ed before saving. """ return ctype in ('image/gif',)
def get_attrs (self, url, localhost, stages, headers): """ Initialize image reducer buffer and flags. """ if not self.applies_to_stages(stages): return {} # don't filter tiny images d = super(ImageReducer, self).get_attrs(url, localhost, stages, headers) # weed out the rules that don't apply to this url rules = [ rule for rule in self.rules if rule.applies_to_url(url) ] if rules: if len(rules) > 1: wc.log.warn(wc.LOG_FILTER, "more than one rule matched %r: %s", url, str(rules)) # first rule wins quality = rules[0].quality minimal_size_bytes = rules[0].minimal_size_bytes else: quality = self.quality minimal_size_bytes = self.minimal_size_bytes try: length = int(headers['server'].get('Content-Length', 0)) except ValueError: wc.log.warn(wc.LOG_FILTER, "invalid content length at %r", url) return d if length < 0: wc.log.warn(wc.LOG_FILTER, "negative content length at %r", url) return d if length == 0: wc.log.warn(wc.LOG_FILTER, "missing content length at %r", url) elif 0 < length < minimal_size_bytes: return d d['imgreducer_buf'] = StringIO.StringIO() # some images have to be convert()ed before saving ctype = headers['server'].get('Content-Type') d['imgreducer_convert'] = convert(ctype) return d
13b8aa23c835eaed0a2ae53aa82e5260107086cf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/13b8aa23c835eaed0a2ae53aa82e5260107086cf/ImageReducer.py
data += content
data += request.content
def construct_request_data (self, request): """ Construct valid HTTP request data string. """ lines = [] version = "HTTP/%d.%d" % request.version lines.append("%s %s %s" % (request.method, request.uri, version)) lines.extend(request.headers) # an empty line ends the headers lines.extend(("", "")) data = "\r\n".join(lines) if request.content: data += content return data
51ab489f74ddf9b36d79a2707841ec7b0704a4b6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/51ab489f74ddf9b36d79a2707841ec7b0704a4b6/__init__.py
if subkey.get('EnableDHCP')==1: ip = subkey.get('DhcpIPAddress')
if subkey.get('EnableDHCP'): ip = subkey.get('DhcpIPAddress', '')
def get_localaddrs (): """all active interfaces' ip addresses""" addrs = sets.Set() try: # search interfaces key = wc.winreg.key_handle(wc.winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces") for subkey in key.subkeys(): if subkey.get('EnableDHCP')==1: ip = subkey.get('DhcpIPAddress') else: ip = subkey.get('IPAddress') if ip: addrs.add(ip) except EnvironmentError: pass return addrs
46e6c1670e7d231539bdadf3e56d95da49100694 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/46e6c1670e7d231539bdadf3e56d95da49100694/nt.py
ip = subkey.get('IPAddress') if ip: addrs.add(ip)
ip = subkey.get('IPAddress', '') if not (isinstance(ip, basestring) and ip): continue addrs.add(str(ip))
def get_localaddrs (): """all active interfaces' ip addresses""" addrs = sets.Set() try: # search interfaces key = wc.winreg.key_handle(wc.winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces") for subkey in key.subkeys(): if subkey.get('EnableDHCP')==1: ip = subkey.get('DhcpIPAddress') else: ip = subkey.get('IPAddress') if ip: addrs.add(ip) except EnvironmentError: pass return addrs
46e6c1670e7d231539bdadf3e56d95da49100694 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/46e6c1670e7d231539bdadf3e56d95da49100694/nt.py
p.debug(1)
def _main (): """USAGE: test/run.sh test/parsefile.py test.html""" import sys if len(sys.argv)!=2: print _main.__doc__ sys.exit(1) if sys.argv[1]=='-': f = sys.stdin else: f = file(sys.argv[1]) from wc.parser.htmllib import HtmlPrinter from wc.parser import htmlsax p = htmlsax.parser(HtmlPrinter()) p.debug(1) size = 1024 #size = 1 data = f.read(size) while data: p.feed(data) data = f.read(size) p.flush()
cd253a6cdf23fbc9d8c7e42879bd4883bca11cb0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cd253a6cdf23fbc9d8c7e42879bd4883bca11cb0/parsefile.py
""" % fname("example")
""" % fname("example", sep="/")
def fname (name): return os.path.join("wc", "dns", "tests", name)
9f0b5ca04367d9257ac9c86c3a391f1d3f26e70b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9f0b5ca04367d9257ac9c86c3a391f1d3f26e70b/test_zone.py
translator = gettext.translation(Name, LocaleDir, [lang])
if lang!='en': translator = gettext.translation(Name, LocaleDir, [lang]) else: translator = None
def __init__ (self, client, url, form, protocol, status=200, msg=i18n._('Ok'), context={}, headers={'Content-Type': 'text/html'}): self.client = client # we pretend to be the server self.connected = True try: lang = i18n.get_headers_lang(headers) # get the template filename path, dirs, lang = get_template_url(url, lang) # do not rely on content-type header value if path.endswith('.html'): headers['Content-Type'] = 'text/html' f = file(path) # get TAL context context = get_context(dirs, form, context, lang) # get translator translator = gettext.translation(Name, LocaleDir, [lang]) # expand template data = expand_template(f, context, translator=translator) else: f = file(path, 'rb') data = f.read() except IOError, e: exception(GUI, "Wrong path `%s'", url) # XXX this can actually lead to a maximum recursion # error when client.error caused the exception return client.error(404, i18n._("Not Found")) except: # catch all other exceptions and report internal error exception(GUI, "Template error") return client.error(500, i18n._("Internal Error")) f.close() # write response self.put_response(data, protocol, status, msg, headers)
ae419adf110b72b6c3e74a036a832d0229454dda /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ae419adf110b72b6c3e74a036a832d0229454dda/__init__.py
"""return True if given file is writable""" if os.path.isdir(fname) or os.path.islink(fname): return False try: if os.path.exists(fname): open(fname, 'a').close() return True else: open(fname, 'w').close() os.remove(fname) return True except IOError: pass return False def get_log_file (name, logname, trydirs=[]): """get full path name to writeable logfile""" dirs = [] if os.name =='nt': dirs.append(os.environ.get("TEMP")) else: dirs.append(os.path.join('/', 'var', 'log', name)) dirs.append(os.path.join('/', 'var', 'tmp', name)) dirs.append(os.path.join('/', 'tmp', name)) dirs.append(os.getcwd()) trydirs = trydirs+dirs for d in trydirs: fullname = os.path.join(d, logname) if iswritable(fullname): return fullname raise IOError("Could not find writable directory for %s in %s" % (logname, str(trydirs)))
"""return True if given file is writable""" if os.path.isdir(fname) or os.path.islink(fname): return False try: if os.path.exists(fname): open(fname, 'a').close() return True else: open(fname, 'w').close() os.remove(fname) return True except IOError: pass return False def get_log_file (name, logname, trydirs=None): """get full path name to writeable logfile""" dirs = [] if os.name == "nt": dirs.append(os.environ.get("TEMP")) else: dirs.append(os.path.join('/', 'var', 'log', name)) dirs.append(os.path.join('/', 'var', 'tmp', name)) dirs.append(os.path.join('/', 'tmp', name)) dirs.append(os.getcwd()) if trydirs is None: trydirs = dirs else: trydirs.extend(dirs) for d in trydirs: fullname = os.path.join(d, logname) if iswritable(fullname): return fullname raise IOError("Could not find writable directory for %s in %s" % (logname, str(trydirs)))
def iswritable (fname): """return True if given file is writable""" if os.path.isdir(fname) or os.path.islink(fname): return False try: if os.path.exists(fname): open(fname, 'a').close() return True else: open(fname, 'w').close() os.remove(fname) return True except IOError: pass return False
208101be0a66da6c9dc5cf88658d5acfe1c68647 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/208101be0a66da6c9dc5cf88658d5acfe1c68647/__init__.py
p.feed("""<a b="c"><""") p.feed("""d>""")
s = """<h1>bla</h1>""" for c in s: p.feed(c) p.flush()
def _broken (): p = HtmlPrinter() p.feed("""<a b="c"><""") p.feed("""d>""")
25e8ed57c6acfdfe71d03c14a622ec92b468c27e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/25e8ed57c6acfdfe71d03c14a622ec92b468c27e/htmllib.py
return (mo.group(1), mo.group(2)) if __name__=='__main__': for url in ['', 'a', 'a/b', 'http://imadoofus.com', 'http://imadoofus.com//', 'http://imadoofus.com/?q=a', 'http://imadoofus.com/?q=a 'http://imadoofus.com/a/b//c', 'http://imadoofus.com/forum', 'http://imadoofus.com/forum/', ]: print rating_split_url(url) print rating_cache_get('http://www.heise.de/foren/')
vmin, vmax = mo.group(1), mo.group(2) if vmin=="": vmin = None else: vmin = int(vmin) if vmax=="": vmax = None else: vmax = int(vmax) return (vmin, vmax)
def rating_range (value): """parse value as range; return tuple (rmin, rmax) or None on error""" mo = _range_re.match(value) if not mo: return None return (mo.group(1), mo.group(2))
9accbce77dae1b724f1f1ad6ba3ab59b899732cb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/9accbce77dae1b724f1f1ad6ba3ab59b899732cb/Rating.py
def server_response (self, response, statuscode, headers):
def server_response (self, response, statuscode, status, headers):
def server_response (self, response, statuscode, headers): """the server got a response""" # Okay, transfer control over to the real client if self.client.connected: config['requests']['valid'] += 1 self.server.client = self.client self.client.server_response(self.server, response, statuscode, headers) else: self.server.client_abort()
5aaaf0f8c7cbeb35466459a95e5888072c287921 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5aaaf0f8c7cbeb35466459a95e5888072c287921/ClientServerMatchmaker.py
self.client.server_response(self.server, response, statuscode, headers)
self.client.server_response(self.server, response, status, headers)
def server_response (self, response, statuscode, headers): """the server got a response""" # Okay, transfer control over to the real client if self.client.connected: config['requests']['valid'] += 1 self.server.client = self.client self.client.server_response(self.server, response, statuscode, headers) else: self.server.client_abort()
5aaaf0f8c7cbeb35466459a95e5888072c287921 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5aaaf0f8c7cbeb35466459a95e5888072c287921/ClientServerMatchmaker.py
elif not ct.startswith(gm[0]) and \ gm[0] in _fix_content_types: warn(PROXY, i18n._("change Content-Type from %s to %s in %s"), `ct`, `gm[0]`, `self.url`) self.headers['Content-Type'] = "%s\r"%gm[0] if gm[1] and gm[1] in _fix_content_encodings: ce = self.headers.get('Content-Encoding', None) if ce is None: self.headers['Content-Encoding'] = "%s\r"%gm[1] warn(PROXY, i18n._("add Content-Encoding %s to %s"), `gm[1]`, `self.url`) elif ce != gm[1]: warn(PROXY, i18n._("change Content-Encoding from %s to %s in %s"), `ce`, `gm[1]`, `self.url`) self.headers['Content-Encoding'] = "%s\r"%gm[1]
def check_headers (self): """add missing content-type and/or encoding headers""" # 304 Not Modified does not send any type or encoding info, # because this info was cached if self.statuscode == '304': return # check content-type against our own guess i = self.document.find('?') if i>0: document = self.document[:i] else: document = self.document gm = mimetypes.guess_type(document, None) ct = self.headers.get('Content-Type', None) if self.mime: if ct is None: warn(PROXY, i18n._("add Content-Type %s in %s"), `self.mime`, `self.url`) self.headers['Content-Type'] = "%s\r"%self.mime elif not ct.startswith(self.mime): i = ct.find(';') if i== -1: val = self.mime else: val = self.mime + ct[i:] warn(PROXY, i18n._("set Content-Type from %s to %s in %s"), `str(ct)`, `val`, `self.url`) self.headers['Content-Type'] = "%s\r"%val elif gm[0]: # guessed an own content type if ct is None: warn(PROXY, i18n._("add Content-Type %s to %s"), `gm[0]`, `self.url`) self.headers['Content-Type'] = "%s\r"%gm[0] # fix some content types elif not ct.startswith(gm[0]) and \ gm[0] in _fix_content_types: warn(PROXY, i18n._("change Content-Type from %s to %s in %s"), `ct`, `gm[0]`, `self.url`) self.headers['Content-Type'] = "%s\r"%gm[0] if gm[1] and gm[1] in _fix_content_encodings: ce = self.headers.get('Content-Encoding', None) # guessed an own encoding type if ce is None: self.headers['Content-Encoding'] = "%s\r"%gm[1] warn(PROXY, i18n._("add Content-Encoding %s to %s"), `gm[1]`, `self.url`) elif ce != gm[1]: warn(PROXY, i18n._("change Content-Encoding from %s to %s in %s"), `ce`, `gm[1]`, `self.url`) self.headers['Content-Encoding'] = "%s\r"%gm[1] # hmm, fix application/x-httpd-php* if self.headers.get('Content-Type', '').lower().startswith('application/x-httpd-php'): warn(PROXY, i18n._("fix x-httpd-php Content-Type")) self.headers['Content-Type'] = 'text/html\r'
7cad4003033d1bd964b8222c220ae1bfd6bbf463 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/7cad4003033d1bd964b8222c220ae1bfd6bbf463/HttpServer.py
headers.append("Content-Length: %d" % len(content)-5)
headers.append("Content-Length: %d" % (len(content)-5))
def get_request_headers (self, content): port = self.server.socket.getsockname()[1] headers = [ "Host: localhost:%d" % port, "Proxy-Connection: close", ] if content: headers.append("Content-Length: %d" % len(content)) headers.append("Content-Length: %d" % len(content)-5) return headers
4178632fd0e93a2cc5b75e213dd7b89be7a0e06b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4178632fd0e93a2cc5b75e213dd7b89be7a0e06b/test_multiheader.py
"Content-Length: %d" % len(content)-5,
"Content-Length: %d" % (len(content)-5),
def get_response_headers (self, content): return [ "Content-Type: text/plain", "Content-Length: %d" % len(content), "Content-Length: %d" % len(content)-5, ]
4178632fd0e93a2cc5b75e213dd7b89be7a0e06b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4178632fd0e93a2cc5b75e213dd7b89be7a0e06b/test_multiheader.py
def generate_sids ():
def generate_sids (prefix="wc"):
def generate_sids (): for rule in _rules_without_sid: rule.sid = generate_unique_sid("wc") del _rules_without_sid[:]
6af989e0d472cc03f4c8722380321c1aa0df7ec1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/6af989e0d472cc03f4c8722380321c1aa0df7ec1/__init__.py
rule.sid = generate_unique_sid("wc")
rule.sid = generate_unique_sid(prefix)
def generate_sids (): for rule in _rules_without_sid: rule.sid = generate_unique_sid("wc") del _rules_without_sid[:]
6af989e0d472cc03f4c8722380321c1aa0df7ec1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/6af989e0d472cc03f4c8722380321c1aa0df7ec1/__init__.py
hosts = sort_seq(ip.map2hosts(self['nofilterhosts']))
hosts = self['nofilterhosts']
def write_proxyconf (self): """write proxy configuration""" f = file(self['configfile'], 'w') f.write("""<?xml version="1.0" encoding="%s"?>
047dfe1a215752fc10b3c7fa9191f5799f3347da /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/047dfe1a215752fc10b3c7fa9191f5799f3347da/__init__.py
'HTTP/1.0 %d Use different host\r\n',
'HTTP/1.0 %d %s\r\n', 'Server: WebCleaner Proxy\r\n'
def error(self, code, msg): ServerHandleDirectly( self.client, 'HTTP/1.0 %d Use different host\r\n', 'Content-type: text/html\r\n' 'Location: http://%s\r\n' '\r\n' % (code, new_url), msg)
d689ab4b5238b9e82a92e3c41d9f0c47782b391e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d689ab4b5238b9e82a92e3c41d9f0c47782b391e/ClientServerMatchmaker.py
'Location: http://%s\r\n' '\r\n' % (code, new_url),
'\r\n' '<html><head>' '<title>WebCleaner Proxy Error %d %s</title>' '</head><body bgcolor=" 'WebCleaner Proxy Error %d %s<br>' '%s<br></center></body></html>' % (code, msg, code, msg),
def error(self, code, msg): ServerHandleDirectly( self.client, 'HTTP/1.0 %d Use different host\r\n', 'Content-type: text/html\r\n' 'Location: http://%s\r\n' '\r\n' % (code, new_url), msg)
d689ab4b5238b9e82a92e3c41d9f0c47782b391e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d689ab4b5238b9e82a92e3c41d9f0c47782b391e/ClientServerMatchmaker.py
assert self.state == 'dns' if answer.isError(): self.error(400, _(answer.data)) return self.state = 'server' self.ipaddr = socket.gethostbyname(self.hostname) self.find_server() def _handle_dns(self, hostname, answer):
def handle_dns(self, hostname, answer): assert self.state == 'dns' if answer.isError(): self.error(400, _(answer.data)) return self.state = 'server' self.ipaddr = socket.gethostbyname(self.hostname)
d689ab4b5238b9e82a92e3c41d9f0c47782b391e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d689ab4b5238b9e82a92e3c41d9f0c47782b391e/ClientServerMatchmaker.py
print "X", repr(file_length) print "X", repr(offset)
def classify (self, f): if not self.entries: raise StandardError("Not initialised properly") # Are we still looking for the ruleset to apply or are we in a rule found_rule = False # When we found the rule, what is the level that we successfull passed in_level = 0 # If we failed part of the rule there is no point looking for higher level subrule allow_next = 0 # String provided by the successfull rule result = ""
687536303ff11effe11421562bee57b974b1a0bb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/687536303ff11effe11421562bee57b974b1a0bb/magic.py
debug(PROXY, '%s <= read %d', self, len(data)) debug(CONNECTION, 'data %r', data)
def handle_read (self): """read data from connection, put it into recv_buffer and call process_read""" assert self.connected debug(PROXY, '%s handle_read', self)
07041c45cdc133cb738a13fb1405dec85759d43c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/07041c45cdc133cb738a13fb1405dec85759d43c/Connection.py
debug(HURT_ME_PLENTY, "request", `self.request`)
def __init__ (self, client, request, headers, content, nofilter,compress): self.client = client self.request = request self.headers = headers self.compress = compress self.content = content self.nofilter = nofilter debug(HURT_ME_PLENTY, "request", `self.request`) self.method, self.url, protocol = self.request.split() scheme, hostname, port, document = spliturl(self.url) # fix missing trailing / if not document: document = '/' # fix missing host headers for HTTP/1.1 if protocol=='HTTP/1.1' and not self.headers.has_key('host'): self.headers['Host'] = hostname if port!=80: self.headers['Host'] += ":%d"%port debug(HURT_ME_PLENTY, "Proxy: splitted url", scheme, hostname, port, document) if scheme=='file': # a blocked url is a local file:// link # this means we should _not_ use this proxy for local # file links :) mtype = mimetypes.guess_type(self.url)[0] config['requests']['valid'] += 1 config['requests']['blocked'] += 1 ServerHandleDirectly(self.client, 'HTTP/1.0 200 OK\r\n', 'Content-Type: %s\r\n\r\n'%(mtype or 'application/octet-stream'), open(document, 'rb').read()) return
de023d7b994a3cb75dfc854df0807d6ae5f19339 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/de023d7b994a3cb75dfc854df0807d6ae5f19339/ClientServerMatchmaker.py
url = baseurl+filename+".gz"
url = baseurl+filename
def update_filter (wconfig, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ chg = False baseurl = wconfig['baseurl']+"filter/" url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return chg # remember all local config files filemap = {} for filename in wc.filterconf_files(): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >>log, "error fetching", url return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating filter %s")%filename else: print >>log, i18n._("adding new filter %s"), filename # parse new filter url = baseurl+filename+".gz" page = open_url(url) p = wc.ZapperParser(fullname, wconfig, compile_data=False) p.parse(fp=page) page.close() chg = wconfig.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >>log, i18n._("error fetching %s:")%url, i18n._("invalid content") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("extern filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating extern filter %s")%filename else: print >>log, i18n._("adding new extern filter %s")%filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg continue data = page.read() if not data: print >>log, i18n._("error fetching %s:")%url, i18n._("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() return chg
c11acc76c9ac079636774d74a38c7a44f6e1ecbc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/c11acc76c9ac079636774d74a38c7a44f6e1ecbc/update.py
if encoding in ('gzip', 'x-gzip', 'deflate') and rewrite:
if encoding in ('gzip', 'x-gzip', 'deflate'):
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() self.statuscode = None if response: self.statuscode = response[1] if self.statuscode == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) self.check_headers() # will content be rewritten? rewrite = self.is_rewrite() # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.get('Content-Length') is not None: self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None # add decoders self.decoders = [] # Chunked encoded if self.headers.get('Transfer-Encoding') is not None: #debug(BRING_IT_ON, 'S/Transfer-encoding:', `self.headers['transfer-encoding']`) self.decoders.append(UnchunkStream()) # remove encoding header to_remove = ["Transfer-Encoding"] if self.headers.get("Content-Length") is not None: print >>sys.stderr, _('Warning: chunked encoding should not have Content-Length') to_remove.append("Content-Length") self.bytes_remaining = None remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" # Compressed content (uncompress only for rewriting modules) encoding = self.headers.get('Content-Encoding', '').lower() if encoding in ('gzip', 'x-gzip', 'deflate') and rewrite: if encoding=='deflate': self.decoders.append(DeflateStream()) else: self.decoders.append(GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if self.headers.get('Cache-Control', '').lower()=='no-transform': to_remove.append('Cache-Control') remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" elif encoding and encoding!='identity' and rewrite: print >>sys.stderr, _("Warning: unsupported encoding:"),`encoding` # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) # initStateObject can modify headers (see Compress.py)! self.attrs = initStateObjects(self.headers, self.url) if self.headers.get('Content-Length') is None: self.headers['Connection'] = 'close' #debug(HURT_ME_PLENTY, "S/Headers filtered", `self.headers.headers`) wc.proxy.HEADERS.append((self.url, 1, self.headers.headers)) self.client.server_response(self.response, self.headers) self.attrs['nofilter'] = self.nofilter['nofilter'] if ((response and response[1] in ('204', '304')) or \ self.method == 'HEAD'): # These response codes indicate no content self.state = 'recycle' else: self.state = 'content'
02280af01cffb8e7abeb5cea52943ce090adfb45 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/02280af01cffb8e7abeb5cea52943ce090adfb45/HttpServer.py
elif encoding and encoding!='identity' and rewrite:
elif encoding and encoding!='identity':
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() self.statuscode = None if response: self.statuscode = response[1] if self.statuscode == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) self.check_headers() # will content be rewritten? rewrite = self.is_rewrite() # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.get('Content-Length') is not None: self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None # add decoders self.decoders = [] # Chunked encoded if self.headers.get('Transfer-Encoding') is not None: #debug(BRING_IT_ON, 'S/Transfer-encoding:', `self.headers['transfer-encoding']`) self.decoders.append(UnchunkStream()) # remove encoding header to_remove = ["Transfer-Encoding"] if self.headers.get("Content-Length") is not None: print >>sys.stderr, _('Warning: chunked encoding should not have Content-Length') to_remove.append("Content-Length") self.bytes_remaining = None remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" # Compressed content (uncompress only for rewriting modules) encoding = self.headers.get('Content-Encoding', '').lower() if encoding in ('gzip', 'x-gzip', 'deflate') and rewrite: if encoding=='deflate': self.decoders.append(DeflateStream()) else: self.decoders.append(GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if self.headers.get('Cache-Control', '').lower()=='no-transform': to_remove.append('Cache-Control') remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" elif encoding and encoding!='identity' and rewrite: print >>sys.stderr, _("Warning: unsupported encoding:"),`encoding` # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) # initStateObject can modify headers (see Compress.py)! self.attrs = initStateObjects(self.headers, self.url) if self.headers.get('Content-Length') is None: self.headers['Connection'] = 'close' #debug(HURT_ME_PLENTY, "S/Headers filtered", `self.headers.headers`) wc.proxy.HEADERS.append((self.url, 1, self.headers.headers)) self.client.server_response(self.response, self.headers) self.attrs['nofilter'] = self.nofilter['nofilter'] if ((response and response[1] in ('204', '304')) or \ self.method == 'HEAD'): # These response codes indicate no content self.state = 'recycle' else: self.state = 'content'
02280af01cffb8e7abeb5cea52943ce090adfb45 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/02280af01cffb8e7abeb5cea52943ce090adfb45/HttpServer.py
self.attrs = initStateObjects(self.headers, self.url) if self.headers.get('Content-Length') is None: self.headers['Connection'] = 'close' wc.proxy.HEADERS.append((self.url, 1, self.headers.headers)) self.client.server_response(self.response, self.headers) self.attrs['nofilter'] = self.nofilter['nofilter'] if ((response and response[1] in ('204', '304')) or \ self.method == 'HEAD'): self.state = 'recycle' else: self.state = 'content' def check_headers (self): """add missing content-type and/or encoding headers if needed""" if self.statuscode == '304': return gm = mimetypes.guess_type(self.document, None) if gm[0]: if self.headers.get('Content-Type') is None: print >>sys.stderr, _("Warning: add Content-Type %s to %s") % \ (`gm[0]`, `self.url`) self.headers['Content-Type'] = gm[0] elif not self.headers['Content-Type'].startswith(gm[0]) and \ gm[0] in _fix_content_types: print >>sys.stderr, _("Warning: change Content-Type from %s to %s in %s") % \ (`self.headers['Content-Type']`, `gm[0]`, `self.url`) self.headers['Content-Type'] = gm[0] if gm[1]: if self.headers.get('Content-Encoding') is None: self.headers['Content-Encoding'] = gm[1] print >>sys.stderr, _("Warning: add Content-Encoding %s to %s") % \ (`gm[1]`, `self.url`) elif self.headers.get('Content-Encoding') != gm[1]: print >>sys.stderr, _("Warning: change Content-Encoding from %s to %s in %s") % \ (`self.headers['Content-Encoding']`, `gm[1]`, `self.url`) self.headers['Content-Encoding'] = gm[1]
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() self.statuscode = None if response: self.statuscode = response[1] if self.statuscode == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) self.check_headers() # will content be rewritten? rewrite = self.is_rewrite() # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.get('Content-Length') is not None: self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None # add decoders self.decoders = [] # Chunked encoded if self.headers.get('Transfer-Encoding') is not None: #debug(BRING_IT_ON, 'S/Transfer-encoding:', `self.headers['transfer-encoding']`) self.decoders.append(UnchunkStream()) # remove encoding header to_remove = ["Transfer-Encoding"] if self.headers.get("Content-Length") is not None: print >>sys.stderr, _('Warning: chunked encoding should not have Content-Length') to_remove.append("Content-Length") self.bytes_remaining = None remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" # Compressed content (uncompress only for rewriting modules) encoding = self.headers.get('Content-Encoding', '').lower() if encoding in ('gzip', 'x-gzip', 'deflate') and rewrite: if encoding=='deflate': self.decoders.append(DeflateStream()) else: self.decoders.append(GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if self.headers.get('Cache-Control', '').lower()=='no-transform': to_remove.append('Cache-Control') remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" elif encoding and encoding!='identity' and rewrite: print >>sys.stderr, _("Warning: unsupported encoding:"),`encoding` # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) # initStateObject can modify headers (see Compress.py)! self.attrs = initStateObjects(self.headers, self.url) if self.headers.get('Content-Length') is None: self.headers['Connection'] = 'close' #debug(HURT_ME_PLENTY, "S/Headers filtered", `self.headers.headers`) wc.proxy.HEADERS.append((self.url, 1, self.headers.headers)) self.client.server_response(self.response, self.headers) self.attrs['nofilter'] = self.nofilter['nofilter'] if ((response and response[1] in ('204', '304')) or \ self.method == 'HEAD'): # These response codes indicate no content self.state = 'recycle' else: self.state = 'content'
02280af01cffb8e7abeb5cea52943ce090adfb45 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/02280af01cffb8e7abeb5cea52943ce090adfb45/HttpServer.py
if self.document.endswith(".bz2") or \ self.document.endswith(".tgz") or \ self.document.endswith(".gz"): gm = mimetypes.guess_type(self.document, False) if gm[1]: self.headers['Content-Encoding'] = gm[1] if gm[0]: self.headers['Content-Type'] = gm[0]
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() if response and response[1] == '100': # it's a Continue request, so go back to waiting for headers self.state = 'response' return # check for unusual compressed files if self.document.endswith(".bz2") or \ self.document.endswith(".tgz") or \ self.document.endswith(".gz"): gm = mimetypes.guess_type(self.document, False) if gm[1]: self.headers['Content-Encoding'] = gm[1] if gm[0]: self.headers['Content-Type'] = gm[0] # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) # will content be rewritten? rewrite = False for ro in config['mime_content_rewriting']: if ro.match(self.headers.get('Content-Type', "")): rewrite = True break #debug(HURT_ME_PLENTY, "S/Headers ", `self.headers.headers`) if self.headers.has_key('Content-Length'): if rewrite: remove_headers(self.headers, ['Content-Length']) self.bytes_remaining = None else: self.bytes_remaining = int(self.headers['Content-Length']) else: self.bytes_remaining = None
c7a765614b330c8ca260a7b155f31d0a0b59bd6c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/c7a765614b330c8ca260a7b155f31d0a0b59bd6c/HttpServer.py
p.feed("""<a><t""") p.feed("""r>""")
p.feed("""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <HTML> <HEAD> <META NAME="Description" CONTENT="Obsession Development: Products"> <META NAME="Resource-Type" CONTENT="document"> <META NAME="Content-Type" CONTENT="text/html, charset=iso-8859-1"> <META HTTP-EQUIV="Content-Style-Type" CONTENT="text/css"> <LINK REL="Stylesheet" HREF="../obsession.css" TYPE="text/css"> <TITLE>Obsession Development: gentoo</TITLE> <!-- SCRIPT LANGUAGE="JavaScript"> <!-- if (document.images) { var obsessionoff = new Image() obsessionoff.src = "../ObsessionOff.GIF" var obsessionon = new Image() obsessionon.src = "../ObsessionOn.GIF" var contactoff = new Image() contactoff.src = "../ContactOff.GIF" var contacton = new Image() contacton.src = "../ContactOn.GIF" var projectsoff = new Image() projectsoff.src = "../ProjectsOff.GIF" var projectson = new Image() projectson.src = "../ProjectsOn.GIF" } function actMenuItem(img1,img2) { act(img1) } function inactMenuItem(img1, img2) { inact(img1) } function act(imgName) { if (document.images) document[imgName].src = eval(imgName + 'on.src') window.status = "Click me!" } function inact(imgName) { if (document.images) document[imgName].src = eval(imgName + 'off.src') window.status = "Obsession - Just ideas." } // --> </SCRIPT --> </HEAD> <BODY BACKGROUND="../bk14.gif" BGCOLOR=" <TABLE COLS=3 ROWS=2 CELLPADDING=0 CELLSPACING=0 BORDER=0> <TR> <TD WIDTH=164 HEIGHT=36><IMG CLASS="Hemlig" SRC="../spacer.GIF" WIDTH=1 HEIGHT=36 BORDER="0"></TD><TD></TD> </TR> <TR> <TD></TD><TD> <TABLE COLS=2 CELLPADDING=0 CELLSPACING=0 BORDER=0> <TR> <TD WIDTH=127 VALIGN=Top ALIGN="CENTER"> <IMG SRC="../OD_Logo-Small.GIF" WIDTH=127 HEIGHT=136 ALT="Magic" BORDER="0"> <BR> <A HREF="MAILTO:[email protected]?subject=[gentoo]"><IMG SRC="../Junk.GIF" WIDTH=25 HEIGHT=19 ALT="E-mail Emil" BORDER="0"></A> <BR> <FONT FACE="Verdana, Arial, Helvetica, sans-serif" SIZE="1"><B><A HREF="MAILTO:[email protected]?subject=[gentoo]">E-mail Author</A> <BR><BR><BR> <P align="center" class="Margin"> <B>Download</B><BR> <A href="http://prdownloads.sourceforge.net/gentoo/gentoo-0.11.25.tar.gz?download" title="Download gentoo"> <IMG border=0 height=17 src="../download.gif" width="19">&nbsp;&nbsp;<B>Download<BR> gentoo 0.11.25 (http)</A> </B> <BR>[711 KB, tar.gz] <BR>Requires GTK+ 1.2.x </P> <P align=center class=Margin> <B>Patch</B><BR> <A href="http://prdownloads.sourceforge.net/gentoo/diff-0.11.24-to-0.11.25.gz?download" title="Download Patch"> <IMG border=0 height=17 src="../download.gif" width=19>&nbsp;&nbsp; <B>Download<BR>0.11.24 to 0.11.25 (http)</A> </B> <BR>[28 KB diff -ruN patch, gzipped] </P><BR> <P align=center class=Margin> <P align=center class=Margin>Packages<BR> <A href="ftp://ftp.falsehope.com/pub/gentoo">Red Hat RPMs</A><BR> [Maintainer: <A href="mailto:[email protected]">Ryan Weaver</A>] <BR> <BR> <A href="http://www.debian.org/Packages/unstable/x11/gentoo.html">Debian DEBs</A><BR> [Maintainer: <A href="mailto:[email protected]">Josip Rodin</A>]<BR> <BR> Gentoo Linux users, type<BR> <TT>emerge app-misc/gentoo</TT> <BR> <BR> <A HREF="http://www.openbsd.org/cgi-bin/cvsweb/ports/x11/gentoo">OpenBSD Port</A><BR> [Maintainer: Jim Geovedi]<BR> </P> <P align=center class=Margin>AppIndex<BR> New releases of gentoo are announced on <A href="http://freshmeat.net/">FreshMeat</A> You can go directly to gentoo's <A href="http://freshmeat.net/appindex/1998/09/24/906621975.html">AppIndex page</A>. </P></FONT> </TD> <TD WIDTH=16></TD> <TD WIDTH=256> <TABLE WIDTH=512 COLS=2 CELLPADDING=0 CELLSPACING=0 BORDER="0"> <TR> <TD WIDTH="8" ROWSPAN="2"></TD <TD WIDTH="512" HEIGHT="88"> <IMG NAME="obsession" SRC="../spacer.GIF" WIDTH=32 HEIGHT=32 BORDER="0" HSPACE=8> <BR CLEAR=All> <IMG SRC="../Just02.GIF" WIDTH=256 HEIGHT=88 ALT="gentoo logo" BORDER="0"></TD> </TR ><TR> <TD> <BR><FONT FACE="Arial, Geneva, Helvetica, sans-serif" SIZE="2"> <P> gentoo is a modern, powerful, flexible, and utterly configurable file manager for UNIX systems, written using the GTK+ toolkit. It aims to be 100% graphically configurable; there's no need to edit config files by hand and then restart the application. gentoo is somewhat inspired in its look &amp; feel by the classic Amiga program DirectoryOpus. It has been successfully tested on a variety of platforms, including Linux/x86, Linux/Alpha, Solaris, FreeBSD and OpenBSD. </P> <P> (If you came here looking for the <A href="http://www.gentoo.org/">Gentoo Linux</A> distribution, you know where to click. Then come back and download gentoo to manage your files with! :) </P> <B>Features</B> <P>Some of the main features of gentoo are: <UL> <FONT face="Arial, Geneva, Helvetica, sans-serif" size="2"> <LI>Written from scratch, using ANSI C and the GTK+ toolkit. <LI>Aims to be 100% graphically configurable, and comes pretty close, too. <LI>Very cool (!) file typing and -styling systems allows you to configure how files of different types are shown (with colors and icons), and what happens when you doubleclick them (spawn image viewers, music players, etc). <LI>Includes more than 120 original pixmaps icons (16x15 pixels). <LI>Internal support for most file operations (copy, move, rename, rename, makedir etc). </FONT> </UL> </P> <B>Requirements</B> <P> The most modern (0.11.x) releases of <B>gentoo</B> require GTK+ 1.2.x. As is normal with GTK+ applications, gentoo also requires the GDK and glib libraries. If you have a working GTK+ installation, you will have these too, so don't worry. If your system does <B>not</B> have GTK+ installed, you need to download it (and glib) from <A href="http://www.gtk.org/">http://www.gtk.org/</A>. </P> <P> It is nice, but not required, to have the <CODE>file(1)</CODE> command installed, since gentoo can use it to identify filetypes. Please be aware that not all <CODE>file</CODE> commands supplied with commercial Un*xes are good enough to be used with gentoo (this is the case with Sun's <CODE>file</CODE> implementation, for example). You might want to look for a replacement. The version found <A href="http://freshmeat.net/projects/file/?highlight=file">here</A> is recommended. </P> <P> A few <B>screenshots</B> of gentoo are also available (Shots show gentoo version 0.11.11, running under <A href="http://www.windowmaker.org/">Window Maker</A> and were taken on 2000-01-04): <UL> <FONT face="Arial, Geneva, Helvetica, sans-serif"> <LI><A href="/gentoo/main.gif" title="Screenshot of gentoo">Main Window</A> [41 KB GIF] <LI><A href="/gentoo/cfg_dirpane.gif" title="Screenshot of gentoo">Dir Pane Config</A> [21 KB GIF] <LI><A href="/gentoo/cfg_styles.gif" title="Screenshot of gentoo">File Style Config</A> [15 KB GIF] <LI><A href="/gentoo/cfg_types.gif" title="Screenshot of gentoo">File Type Config</A> [18 KB GIF] <LI><A href="/gentoo/cfg_buttons.gif" title="Screenshot of gentoo">Action Button Config</A> [16 KB GIF] </FONT> </UL> <P></P> <P> <B>User-Contributed Screenshots</B> <UL> <FONT face="Arial, Geneva, Helvetica, sans-serif"> <LI><A href="/gentoo/contrib/Stefan_Eiserman.gif">Main Window</A> [By Stefan Eiserman, 77 KB GIF]</LI> <LI><A href="/gentoo/contrib/Stefan_Eiserman2.jpg">Main Window, big</A> [Also by Stefan Eiserman, 333 KB JPG]</LI> <LI><A href="/gentoo/contrib/theduke_dockicon.xpm">Window Maker dock icon</A> [By Kris, &lt;<A href="mailto:[email protected]">[email protected]</A>&gt;, 21 KB XPM]</LI> <LI><A HREF="/gentoo/contrib/Stefan_Nicolin.jpg">Main Window (themed GTK+)</A> [By Stefan Nicolin, 75 KB JPG]</LI> <LI><A HREF="/gentoo/contrib/Johannes_Tevessen.gif">Main Window (themed GTK+)</A> [By Johannes Tevessen, 58 KB GIF]</LI> <LI><A HREF="/gentoo/contrib/Erik_Sittmann.jpg">Main Window (Cygwin/Win32)</A> [By Erik Sittmann, 388 KB JPG]</LI> </FONT> </UL> </P> </TD> </TR> </TABLE> </TD> </TR> </TABLE> </TD> </TR> </TABLE> </BODY> </HTML> """)
def _broken (): p = HtmlPrinter() p.feed("""<a><t""") p.feed("""r>""") p.flush()
5b39040dd85d5381c8f17f797f74aad71815bfd8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5b39040dd85d5381c8f17f797f74aad71815bfd8/htmllib.py
if not category.valid_value(value):
if category.iterable: realvalue = value else: realvalue = _intrange_from_string(value) if not category.valid_value(realvalue):
def _form_ratings (form): """Check category value validity""" for catname, value in _get_prefix_vals(form, 'category_'): category = _get_category(catname) if category is None: # unknown category error['categoryvalue'] = True return False if not category.valid_value(value): error['categoryvalue'] = True return False if category.iterable: values[catname]['none'] = False values[catname][value] = True else: values[catname] = value return True
a4c18f8b00dd11d6673142806f476a8ab23a0a0f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/a4c18f8b00dd11d6673142806f476a8ab23a0a0f/rating_html.py
p = wc.configuration.ZapperParser(filename)
p = wc.configuration.confparse.ZapperParser(filename)
def read_ids (filename, ids): p = wc.configuration.ZapperParser(filename) p.parse() ids['folder']['sid'] = str(p.folder.sid) ids['folder']['oid'] = p.folder.oid ids['folder']['configversion'] = str(p.folder.configversion) for rule in p.folder.rules: for ftype in ('domains', 'urls'): if rule.name.endswith(ftype): ids[ftype]['sid'] = str(rule.sid)
3465b5ee3ef1c0df3638f47dce4d547cf4c2df9c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/3465b5ee3ef1c0df3638f47dce4d547cf4c2df9c/bl2wc.py
fname = "extracted/"+fname[:-3]
fname = os.path.join(extract_to, fname[:-3])
def blacklist (fname, extract_to="extracted"): source = os.path.join("downloads", fname) # extract tar if fname.endswith(".tar.gz") or fname.endswith(".tgz"): print "extracting archive", fname f = tarfile.TarFile.gzopen(source) for m in f: a, b = os.path.split(m.name) a = os.path.basename(a) if b in myfiles and a in mycats: print m.name f.extract(m, extract_to) f.close() read_blacklists(extract_to) rm_rf(extract_to) elif fname.endswith(".gz"): print "gunzip..." f = gzip.open(source) fname = "extracted/"+fname[:-3] os.makedirs(os.path.dirname(fname), 0722) w = file(fname, 'wb') w.write(f.read()) w.close() f.close() read_data(fname, "domains", domains)
3465b5ee3ef1c0df3638f47dce4d547cf4c2df9c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/3465b5ee3ef1c0df3638f47dce4d547cf4c2df9c/bl2wc.py
self.js_script += data
self.js_script += data.decode(self.htmlparser.encoding, "ignore")
def jsScriptData (self, data, url, ver): """Callback for loading <script src=""> data in the background If downloading is finished, data is None""" assert self.htmlparser.state[0] == 'wait', "non-wait state" wc.log.debug(wc.LOG_JS, "%s jsScriptData %r", self, data) if data is None: if not self.js_script: wc.log.warn(wc.LOG_JS, "empty JavaScript src %s", url) self.js_script = u"// "+\ _("error fetching script from %r") % url self.htmlparser.tagbuf.append( [wc.filter.rules.RewriteRule.STARTTAG, u"script", {'type': 'text/javascript'}]) # norm html comments script = wc.js.remove_html_comments(self.js_script) script = u"\n<!--\n%s\n//-->\n" % wc.js.escape_js(script) self.htmlparser.tagbuf.append( [wc.filter.rules.RewriteRule.DATA, script]) # Note: <script src=""> could be missing an end tag, # but now we need one. Look later for a duplicate </script>. self.htmlparser.tagbuf.append( [wc.filter.rules.RewriteRule.ENDTAG, u"script"]) self.js_script = u'' self.htmlparser.state = ('parse',) wc.log.debug(wc.LOG_JS, "%s switching back to parse with", self) self.htmlparser.debugbuf(wc.LOG_JS) else: wc.log.debug(wc.LOG_JS, "JS read %d <= %s", len(data), url) self.js_script += data
8ccd48f9e30f773513b3a2077760f5ffc6bb980b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/8ccd48f9e30f773513b3a2077760f5ffc6bb980b/JSFilter.py
res = [None]
def _form_reset (): """reset info/error and global vars""" global filterenabled, filterdisabled filterenabled = u"" filterdisabled = u"" for key in info.keys(): info[key] = False for key in error.keys(): error[key] = False res = [None]
f22f1610959a45b0274edaa01996e69b5de6708c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/f22f1610959a45b0274edaa01996e69b5de6708c/config_html.py
def update (wconfig, baseurl, dryrun=False, log=None):
def update (wconfig, dryrun=False, log=None):
def update (wconfig, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ chg = False url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return chg # remember all local config files filemap = {} for filename in wc.filterconf_files(): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >>log, "error fetching", url return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating filter %s")%filename else: print >>log, i18n._("adding new filter %s"), filename # parse new filter url = baseurl+filename+".gz" page = open_url(url) p = wc.ZapperParser(fullname, compile_data=False) p.parse(page, wconfig) page.close() chg = wconfig.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >>log, i18n._("error fetching %s:")%url, i18n._("invalid content") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("extern filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating extern filter %s")%filename else: print >>log, i18n._("adding new extern filter %s")%filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg continue data = page.read() if not data: print >>log, i18n._("error fetching %s:")%url, i18n._("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() return chg
8342ac908384bd779d9849f21b36dc47ba58d309 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/8342ac908384bd779d9849f21b36dc47ba58d309/update.py
p = wc.ZapperParser(fullname, compile_data=False) p.parse(page, wconfig)
p = wc.ZapperParser(fullname, wconfig, compile_data=False) p.parse(fp=page)
def update (wconfig, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ chg = False url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return chg # remember all local config files filemap = {} for filename in wc.filterconf_files(): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >>log, "error fetching", url return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating filter %s")%filename else: print >>log, i18n._("adding new filter %s"), filename # parse new filter url = baseurl+filename+".gz" page = open_url(url) p = wc.ZapperParser(fullname, compile_data=False) p.parse(page, wconfig) page.close() chg = wconfig.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >>log, i18n._("error fetching %s:")%url, i18n._("invalid content") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("extern filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating extern filter %s")%filename else: print >>log, i18n._("adding new extern filter %s")%filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg continue data = page.read() if not data: print >>log, i18n._("error fetching %s:")%url, i18n._("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() return chg
8342ac908384bd779d9849f21b36dc47ba58d309 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/8342ac908384bd779d9849f21b36dc47ba58d309/update.py
def _test (): initlog("test/logging.conf") baseurl = "http://localhost/~calvin/webcleaner.sf.net/htdocs/test/" update(wc.Configuration(), baseurl, dryrun=True) if __name__=='__main__': _test()
def update (wconfig, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ chg = False url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return chg # remember all local config files filemap = {} for filename in wc.filterconf_files(): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >>log, "error fetching", url return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating filter %s")%filename else: print >>log, i18n._("adding new filter %s"), filename # parse new filter url = baseurl+filename+".gz" page = open_url(url) p = wc.ZapperParser(fullname, compile_data=False) p.parse(page, wconfig) page.close() chg = wconfig.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >>log, i18n._("error fetching %s:")%url, i18n._("invalid content") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("extern filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating extern filter %s")%filename else: print >>log, i18n._("adding new extern filter %s")%filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg continue data = page.read() if not data: print >>log, i18n._("error fetching %s:")%url, i18n._("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() return chg
8342ac908384bd779d9849f21b36dc47ba58d309 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/8342ac908384bd779d9849f21b36dc47ba58d309/update.py
def new_instance (self, opts):
def new_instance (self, **opts):
def new_instance (self, opts): return HtmlFilter(self.rules, self.ratings, self.url, **opts)
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
self._debug("cdata %r", data)
debug(FILTER, "%s cdata %r", self, data)
def cdata (self, data): """character data""" self._debug("cdata %r", data) return self._data(data)
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
self._debug("characters %r", data)
debug(FILTER, "%s characters %r", self, data)
def characters (self, data): """characters""" self._debug("characters %r", data) return self._data(data)
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
self._debug("comment %r", data)
debug(FILTER, "%s comment %r", self, data)
def comment (self, data): """a comment; accept only non-empty comments""" if not (self.comments and data): return self._debug("comment %r", data) item = [COMMENT, data] self.htmlparser.tagbuf.append(item)
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
self._debug("doctype %r", data)
debug(FILTER, "%s doctype %r", self, data)
def doctype (self, data): self._debug("doctype %r", data) return self._data("<!DOCTYPE%s>"%data)
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
self._debug("pi %r", data)
debug(FILTER, "%s pi %r", self, data)
def pi (self, data): self._debug("pi %r", data) return self._data("<?%s?>"%data)
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
self._debug("startElement %r", tag)
debug(FILTER, "%s startElement %r", self, tag) if self._is_waiting([STARTTAG, tag, attrs]): return
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug("startElement %r", tag) tag = check_spelling(tag, self.url) if self.stackcount: if self.stackcount[-1][0]==tag: self.stackcount[-1][1] += 1 if tag=="meta": if attrs.get('http-equiv', '').lower() =='content-rating': rating = resolve_html_entities(attrs.get('content', '')) url, rating = rating_import(url, rating) # note: always put this in the cache, since this overrides # any http header setting, and page content changes more # often rating_add(url, rating) elif tag=="body": if self.ratings: # headers finished, check rating data for rule in self.ratings: msg = rating_allow(self.url, rule) if msg: raise FilterRating(msg) self.ratings = [] elif tag=="base" and attrs.has_key('href'): self.base_url = attrs['href'] # some base urls are just the host name, eg. www.imadoofus.com if not urllib.splittype(self.base_url)[0]: self.base_url = "%s://%s" % \ (urllib.splittype(self.url)[0], self.base_url) self._debug("using base url %r", self.base_url) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self) # look for filter rules which apply self._filterStartElement(tag, attrs) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.javascript: self.htmlparser.tagbuf2data()
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
self._debug("using base url %r", self.base_url)
debug(FILTER, "%s using base url %r", self, self.base_url)
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug("startElement %r", tag) tag = check_spelling(tag, self.url) if self.stackcount: if self.stackcount[-1][0]==tag: self.stackcount[-1][1] += 1 if tag=="meta": if attrs.get('http-equiv', '').lower() =='content-rating': rating = resolve_html_entities(attrs.get('content', '')) url, rating = rating_import(url, rating) # note: always put this in the cache, since this overrides # any http header setting, and page content changes more # often rating_add(url, rating) elif tag=="body": if self.ratings: # headers finished, check rating data for rule in self.ratings: msg = rating_allow(self.url, rule) if msg: raise FilterRating(msg) self.ratings = [] elif tag=="base" and attrs.has_key('href'): self.base_url = attrs['href'] # some base urls are just the host name, eg. www.imadoofus.com if not urllib.splittype(self.base_url)[0]: self.base_url = "%s://%s" % \ (urllib.splittype(self.url)[0], self.base_url) self._debug("using base url %r", self.base_url) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self) # look for filter rules which apply self._filterStartElement(tag, attrs) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.javascript: self.htmlparser.tagbuf2data()
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
self._filterStartElement(tag, attrs)
self.filterStartElement(tag, attrs)
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug("startElement %r", tag) tag = check_spelling(tag, self.url) if self.stackcount: if self.stackcount[-1][0]==tag: self.stackcount[-1][1] += 1 if tag=="meta": if attrs.get('http-equiv', '').lower() =='content-rating': rating = resolve_html_entities(attrs.get('content', '')) url, rating = rating_import(url, rating) # note: always put this in the cache, since this overrides # any http header setting, and page content changes more # often rating_add(url, rating) elif tag=="body": if self.ratings: # headers finished, check rating data for rule in self.ratings: msg = rating_allow(self.url, rule) if msg: raise FilterRating(msg) self.ratings = [] elif tag=="base" and attrs.has_key('href'): self.base_url = attrs['href'] # some base urls are just the host name, eg. www.imadoofus.com if not urllib.splittype(self.base_url)[0]: self.base_url = "%s://%s" % \ (urllib.splittype(self.url)[0], self.base_url) self._debug("using base url %r", self.base_url) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self) # look for filter rules which apply self._filterStartElement(tag, attrs) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.javascript: self.htmlparser.tagbuf2data()
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
def _filterStartElement (self, tag, attrs):
def filterStartElement (self, tag, attrs):
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
self._debug("matched rule %r on tag %r", rule.title, tag)
debug(FILTER, "%s matched rule %r on tag %r", self, rule.title, tag)
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
self._debug("put on buffer")
debug(FILTER, "%s put rule %r on buffer", self, rule.title)
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
self._jsStartElement(tag, attrs)
self.jsStartElement(tag, attrs)
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
self._debug("endElement %r", tag)
debug(FILTER, "%s endElement %r", self, tag) if self._is_waiting([ENDTAG, tag]): return
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py
if not self._filterEndElement(tag):
if not self.filterEndElement(tag):
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
4a1c178ba0dd448a42795b2c19afac8cb1126130 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/4a1c178ba0dd448a42795b2c19afac8cb1126130/HtmlFilter.py