rem
stringlengths 1
322k
| add
stringlengths 0
2.05M
| context
stringlengths 4
228k
| meta
stringlengths 156
215
|
---|---|---|---|
if not urlparts[2]: if urlparts[0] and \ urlparts[0] not in urlparse.non_hierarchical and \ (urlparts[3] or urlparts[4]): urlparts[2] = '/' else: urlparts[2] = collapse_segments(urlparts[2]) | is_hierarchical = urlparts[0] not in urlparse.non_hierarchical if is_hierarchical: if not urlparts[2]: if urlparts[0] and (urlparts[3] or urlparts[4]): urlparts[2] = '/' else: urlparts[2] = collapse_segments(urlparts[2]) | def url_norm (url): """ Normalize the given URL which must be quoted. Supports unicode hostnames (IDNA encoding) according to RFC 3490. @return: (normed url, idna flag) @rtype: tuple of length two """ urlparts = list(urlparse.urlsplit(url)) # scheme urlparts[0] = urllib.unquote(urlparts[0]).lower() # mailto: urlsplit is broken if urlparts[0] == 'mailto': url_fix_mailto_urlsplit(urlparts) # host (with path or query side effects) is_idn = url_fix_host(urlparts) # query urlparts[3] = url_parse_query(urlparts[3]) if not urlparts[2]: # empty path is allowed if url is non-hierarchical, or if both # query and fragment are also empty # note that in relative links, urlparts[0] might be empty # in this case, do not make any assumptions if urlparts[0] and \ urlparts[0] not in urlparse.non_hierarchical and \ (urlparts[3] or urlparts[4]): urlparts[2] = '/' else: # fix redundant path parts urlparts[2] = collapse_segments(urlparts[2]) # quote parts again urlparts[0] = urllib.quote(urlparts[0]) # scheme urlparts[1] = urllib.quote(urlparts[1], '@:') # host urlparts[2] = urllib.quote(urlparts[2], _nopathquote_chars) # path res = urlparse.urlunsplit(urlparts) if url.endswith('#') and not urlparts[4]: # re-append trailing empty fragment res += '#' return (res, is_idn) | aa523745b34373f1092ec5c52339619ffc90bf51 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/aa523745b34373f1092ec5c52339619ffc90bf51/url.py |
data.append("has_crypto = %s" % str(has_crypto)) data.append("has_pil = %s" % str(has_pil)) data.append("has_ssl = %s" % str(has_ssl)) | def create_conf_file (self, data, directory=None): """create local config file from given data (list of lines) in the directory (or current directory if not given) """ data.insert(0, "# this file is automatically created by setup.py") data.insert(0, "# -*- coding: iso-8859-1 -*-") if directory is None: directory = os.getcwd() filename = self.get_conf_filename(directory) # add metadata metanames = ("name", "version", "author", "author_email", "maintainer", "maintainer_email", "url", "license", "description", "long_description", "keywords", "platforms", "fullname", "contact", "contact_email", "fullname") for name in metanames: method = "get_" + name cmd = "%s = %r" % (name, getattr(self.metadata, method)()) data.append(cmd) data.append('appname = "WebCleaner"') data.append("has_crypto = %s" % str(has_crypto)) data.append("has_pil = %s" % str(has_pil)) data.append("has_ssl = %s" % str(has_ssl)) util.execute(write_file, (filename, data), "creating %s" % filename, self.verbose>=1, self.dry_run) | 1799c5e967ddd8901b4af589616ffe9f52933134 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/1799c5e967ddd8901b4af589616ffe9f52933134/setup.py |
|
scripts = [ 'webcleaner', 'webcleaner-certificates', ] if os.name=='nt' or win_cross_compiling: scripts.append('install-webcleaner.py') | def create_conf_file (self, data, directory=None): """create local config file from given data (list of lines) in the directory (or current directory if not given) """ data.insert(0, "# this file is automatically created by setup.py") data.insert(0, "# -*- coding: iso-8859-1 -*-") if directory is None: directory = os.getcwd() filename = self.get_conf_filename(directory) # add metadata metanames = ("name", "version", "author", "author_email", "maintainer", "maintainer_email", "url", "license", "description", "long_description", "keywords", "platforms", "fullname", "contact", "contact_email", "fullname") for name in metanames: method = "get_" + name cmd = "%s = %r" % (name, getattr(self.metadata, method)()) data.append(cmd) data.append('appname = "WebCleaner"') data.append("has_crypto = %s" % str(has_crypto)) data.append("has_pil = %s" % str(has_pil)) data.append("has_ssl = %s" % str(has_ssl)) util.execute(write_file, (filename, data), "creating %s" % filename, self.verbose>=1, self.dry_run) | 1799c5e967ddd8901b4af589616ffe9f52933134 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/1799c5e967ddd8901b4af589616ffe9f52933134/setup.py |
|
scripts = ['webcleaner', 'webcleaner-certificates'], | scripts = scripts, | def create_conf_file (self, data, directory=None): """create local config file from given data (list of lines) in the directory (or current directory if not given) """ data.insert(0, "# this file is automatically created by setup.py") data.insert(0, "# -*- coding: iso-8859-1 -*-") if directory is None: directory = os.getcwd() filename = self.get_conf_filename(directory) # add metadata metanames = ("name", "version", "author", "author_email", "maintainer", "maintainer_email", "url", "license", "description", "long_description", "keywords", "platforms", "fullname", "contact", "contact_email", "fullname") for name in metanames: method = "get_" + name cmd = "%s = %r" % (name, getattr(self.metadata, method)()) data.append(cmd) data.append('appname = "WebCleaner"') data.append("has_crypto = %s" % str(has_crypto)) data.append("has_pil = %s" % str(has_pil)) data.append("has_ssl = %s" % str(has_ssl)) util.execute(write_file, (filename, data), "creating %s" % filename, self.verbose>=1, self.dry_run) | 1799c5e967ddd8901b4af589616ffe9f52933134 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/1799c5e967ddd8901b4af589616ffe9f52933134/setup.py |
filters = [FILTER_RESPONSE_HEADER, FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE, ] self.attrs = get_filterattrs(self.url, filters, headers=msg) | self.attrs = get_filterattrs(self.url, [FILTER_RESPONSE_HEADER], headers=msg) | def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # get headers fp = StringIO(self.read(m.end())) msg = WcMessage(fp) # put unparsed data (if any) back to the buffer msg.rewindbody() self.recv_buffer = fp.read() + self.recv_buffer debug(PROXY, "%s server headers\n%s", str(self), str(msg)) if self.statuscode==100: # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return http_ver = serverpool.http_versions[self.addr] if http_ver >= (1,1): self.persistent = not has_header_value(msg, 'Connection', 'Close') elif http_ver >= (1,0): self.persistent = has_header_value(msg, 'Connection', 'Keep-Alive') else: self.persistent = False filters = [FILTER_RESPONSE_HEADER, FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE, ] self.attrs = get_filterattrs(self.url, filters, headers=msg) try: self.headers = applyfilter(FILTER_RESPONSE_HEADER, msg, "finish", self.attrs) except FilterPics, msg: self.statuscode = 403 debug(PROXY, "%s FilterPics %s", str(self), `msg`) # XXX get version response = "HTTP/1.1 403 Forbidden" headers = WcMessage(StringIO('Content-type: text/plain\r\n' 'Content-Length: %d\r\n\r\n' % len(msg))) self.client.server_response(response, self.statuscode, headers) self.client.server_content(msg) self.client.server_close() self.state = 'recycle' self.reuse() return server_set_headers(self.headers) self.bytes_remaining = server_set_encoding_headers(self.headers, self.is_rewrite(), self.decoders, self.client.compress, self.bytes_remaining) # 304 Not Modified does not send any type info, because it was cached if self.statuscode!=304: server_set_content_headers(self.headers, self.document, self.mime, self.url) # XXX <doh> #if not self.headers.has_key('Content-Length'): # self.headers['Connection'] = 'close\r' #remove_headers(self.headers, ['Keep-Alive']) # XXX </doh> if self.statuscode!=407: self.client.server_response(self.response, self.statuscode, self.headers) if self.statuscode in (204, 304) or self.method == 'HEAD': # These response codes indicate no content self.state = 'recycle' else: self.state = 'content' | a4b027340c36d7b5af70fcf0a57827d429c563ea /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/a4b027340c36d7b5af70fcf0a57827d429c563ea/HttpServer.py |
is_closed = decoder.closed or is_closed | debug(PROXY, "%s have run decoder %s", str(self), str(decoder)) if not is_closed and decoder.closed: is_closed = True | def process_content (self): data = self.read(self.bytes_remaining) if self.bytes_remaining is not None: # If we do know how many bytes we're dealing with, # we'll close the connection when we're done self.bytes_remaining -= len(data) debug(PROXY, "%s %d bytes remaining", str(self), self.bytes_remaining) is_closed = False for decoder in self.decoders: data = decoder.decode(data) is_closed = decoder.closed or is_closed try: for i in [FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE]: data = applyfilter(i, data, "filter", self.attrs) if data: if self.statuscode!=407: self.client.server_content(data) self.data_written = True except FilterWait, msg: debug(PROXY, "%s FilterWait %s", str(self), `msg`) except FilterPics, msg: debug(PROXY, "%s FilterPics %s", str(self), `msg`) assert not self.data_written # XXX interactive options here self.client.server_content(str(msg)) self.client.server_close() self.state = 'recycle' self.reuse() return underflow = self.bytes_remaining is not None and \ self.bytes_remaining < 0 if underflow: warn(PROXY, i18n._("server received %d bytes more than content-length"), (-self.bytes_remaining)) if is_closed or self.bytes_remaining==0: # Either we ran out of bytes, or the decoder says we're done self.state = 'recycle' | a4b027340c36d7b5af70fcf0a57827d429c563ea /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/a4b027340c36d7b5af70fcf0a57827d429c563ea/HttpServer.py |
for i in [FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE]: | for i in _response_filters: | def process_content (self): data = self.read(self.bytes_remaining) if self.bytes_remaining is not None: # If we do know how many bytes we're dealing with, # we'll close the connection when we're done self.bytes_remaining -= len(data) debug(PROXY, "%s %d bytes remaining", str(self), self.bytes_remaining) is_closed = False for decoder in self.decoders: data = decoder.decode(data) is_closed = decoder.closed or is_closed try: for i in [FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE]: data = applyfilter(i, data, "filter", self.attrs) if data: if self.statuscode!=407: self.client.server_content(data) self.data_written = True except FilterWait, msg: debug(PROXY, "%s FilterWait %s", str(self), `msg`) except FilterPics, msg: debug(PROXY, "%s FilterPics %s", str(self), `msg`) assert not self.data_written # XXX interactive options here self.client.server_content(str(msg)) self.client.server_close() self.state = 'recycle' self.reuse() return underflow = self.bytes_remaining is not None and \ self.bytes_remaining < 0 if underflow: warn(PROXY, i18n._("server received %d bytes more than content-length"), (-self.bytes_remaining)) if is_closed or self.bytes_remaining==0: # Either we ran out of bytes, or the decoder says we're done self.state = 'recycle' | a4b027340c36d7b5af70fcf0a57827d429c563ea /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/a4b027340c36d7b5af70fcf0a57827d429c563ea/HttpServer.py |
for i in [FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE]: | for i in _response_filters: | def flush (self): """flush data of decoders (if any) and filters""" debug(PROXY, "%s flushing", str(self)) self.flushing = True data = "" while self.decoders: data = self.decoders[0].flush() del self.decoders[0] for decoder in self.decoders: data = decoder.decode(data) try: for i in [FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE]: data = applyfilter(i, data, "finish", self.attrs) except FilterWait, msg: debug(PROXY, "%s FilterWait %s", str(self), `msg`) # the filter still needs some data so try flushing again # after a while make_timer(0.2, lambda : self.flush()) return # the client might already have closed if self.client and self.statuscode!=407: if data: self.client.server_content(data) self.client.server_close() self.attrs = {} if self.statuscode!=407: self.reuse() | a4b027340c36d7b5af70fcf0a57827d429c563ea /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/a4b027340c36d7b5af70fcf0a57827d429c563ea/HttpServer.py |
version = "3.0", | version = "2.40", | def get_file_list (self): super(MySdist, self).get_file_list() self.filelist.append("MANIFEST") | 0296c29f312b852025ac1972d504b1a62be65bc4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/0296c29f312b852025ac1972d504b1a62be65bc4/setup.py |
BUF_SIZE=512 | def getAttrs (self, headers, url): # weed out the rules that dont apply to this url rules = filter(lambda r, u=url: r.appliesTo(u), self.rules) if not rules: return {} return {'buf': Buf(rules)} | d792f423cab25aa473c86812bb3c8611dd32d0cf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d792f423cab25aa473c86812bb3c8611dd32d0cf/Replacer.py |
|
self.rules = rules | def __init__ (self, rules): self.buf = "" self.rules = rules | d792f423cab25aa473c86812bb3c8611dd32d0cf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d792f423cab25aa473c86812bb3c8611dd32d0cf/Replacer.py |
|
data = self.buf + data | self.buf += data if len(self.buf) > 512: self._replace() if len(self.buf) > 256: data = self.buf self.buf = self.buf[-256:] return data[:-256] return "" def _replace (self): | def replace (self, data): data = self.buf + data for rule in self.rules: data = rule.search.sub(rule.replace, data) self.buf = data[-BUF_SIZE:] return data[:-BUF_SIZE] | d792f423cab25aa473c86812bb3c8611dd32d0cf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d792f423cab25aa473c86812bb3c8611dd32d0cf/Replacer.py |
data = rule.search.sub(rule.replace, data) self.buf = data[-BUF_SIZE:] return data[:-BUF_SIZE] | self.buf = rule.search.sub(rule.replace, self.buf) | def replace (self, data): data = self.buf + data for rule in self.rules: data = rule.search.sub(rule.replace, data) self.buf = data[-BUF_SIZE:] return data[:-BUF_SIZE] | d792f423cab25aa473c86812bb3c8611dd32d0cf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d792f423cab25aa473c86812bb3c8611dd32d0cf/Replacer.py |
fv = FXVerticalFrame(g, LAYOUT_FILL_X|LAYOUT_LEFT|LAYOUT_TOP, 0,0,0,0, 0,0,0,0, 0,0) | fv = FXVerticalFrame(g, LAYOUT_FILL_X|LAYOUT_FILL_Y|LAYOUT_LEFT|LAYOUT_TOP, 0,0,0,0, 0,0,0,0, 0,0) | def __init__ (self, parent, rule, index): """initialize pics rule display frame""" FXRuleFrame.__init__(self, parent, rule, index) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_URL,FXPicsRuleFrame.onCmdUrl) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_SERVICE,FXPicsRuleFrame.onCmdService) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_CATEGORY,FXPicsRuleFrame.onCmdCategory) | 0350497bce6790417e0d4b5f3bc223a2bdce721c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/0350497bce6790417e0d4b5f3bc223a2bdce721c/FXPicsRuleFrame.py |
FXTextField(fh, 25, self, FXPicsRuleFrame.ID_URL).setText(self.rule.url) scroll = FXScrollWindow(self, LAYOUT_FILL_X|LAYOUT_FILL_Y|LAYOUT_LEFT|LAYOUT_TOP|SCROLLERS_TRACK, 0,0,0,0) fv = FXVerticalFrame(scroll, LAYOUT_FILL_X|LAYOUT_LEFT|LAYOUT_TOP, 0,0,0,0, 0,0,0,0, 0,0) | FXTextField(fh, 27, self, FXPicsRuleFrame.ID_URL).setText(self.rule.url) scroll = FXScrollWindow(fv, LAYOUT_FILL_X|LAYOUT_FILL_Y|LAYOUT_LEFT|LAYOUT_TOP|SCROLLERS_TRACK, 0,0,0,0) fv = FXVerticalFrame(scroll, LAYOUT_FILL_X|LAYOUT_FILL_Y|LAYOUT_LEFT|LAYOUT_TOP, 0,0,0,0, 0,0,0,0, 0,0) | def __init__ (self, parent, rule, index): """initialize pics rule display frame""" FXRuleFrame.__init__(self, parent, rule, index) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_URL,FXPicsRuleFrame.onCmdUrl) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_SERVICE,FXPicsRuleFrame.onCmdService) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_CATEGORY,FXPicsRuleFrame.onCmdCategory) | 0350497bce6790417e0d4b5f3bc223a2bdce721c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/0350497bce6790417e0d4b5f3bc223a2bdce721c/FXPicsRuleFrame.py |
for service, sdata in services.items(): | _services = services.keys() _services.sort() for service in _services: sdata = services[service] | def __init__ (self, parent, rule, index): """initialize pics rule display frame""" FXRuleFrame.__init__(self, parent, rule, index) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_URL,FXPicsRuleFrame.onCmdUrl) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_SERVICE,FXPicsRuleFrame.onCmdService) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_CATEGORY,FXPicsRuleFrame.onCmdCategory) | 0350497bce6790417e0d4b5f3bc223a2bdce721c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/0350497bce6790417e0d4b5f3bc223a2bdce721c/FXPicsRuleFrame.py |
for category in sdata['categories'].keys(): | _categories = sdata['categories'].keys() _categories.sort() for category in _categories: | def __init__ (self, parent, rule, index): """initialize pics rule display frame""" FXRuleFrame.__init__(self, parent, rule, index) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_URL,FXPicsRuleFrame.onCmdUrl) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_SERVICE,FXPicsRuleFrame.onCmdService) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_CATEGORY,FXPicsRuleFrame.onCmdCategory) | 0350497bce6790417e0d4b5f3bc223a2bdce721c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/0350497bce6790417e0d4b5f3bc223a2bdce721c/FXPicsRuleFrame.py |
def create_message1 (flags=0xb203): "" | def create_message1 (flags="\xb2\x03"): | def create_message1 (flags=0xb203): "" # overall lenght = 48 bytes protocol = 'NTLMSSP\000' #name type = '\001\000' #type 1 zeros1 = '\000\000' flags = utils.hex2str(flags) zeros2 = '\000\000\000\000\000\000\000\000\000' zeros3 = '\000\000\000\000\000\000\000\000\000\000\000' smthg1 = '0\000\000\000\000\000\000\000' # something with chr(48) length? smthg2 = '0\000\000\000' # something with chr(48) lenght? msg1 = protocol + type + zeros1 + flags + zeros2 + zeros3 + smthg1 + smthg2 msg1 = base64.encodestring(msg1) msg1 = msg1.replace('\012', '') return msg1 | eee8c9cb632f740c77f1c8da55a9ebd54bbc2cbd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/eee8c9cb632f740c77f1c8da55a9ebd54bbc2cbd/ntlm.py |
protocol = 'NTLMSSP\000' type = '\001\000' zeros1 = '\000\000' flags = utils.hex2str(flags) zeros2 = '\000\000\000\000\000\000\000\000\000' zeros3 = '\000\000\000\000\000\000\000\000\000\000\000' smthg1 = '0\000\000\000\000\000\000\000' smthg2 = '0\000\000\000' msg1 = protocol + type + zeros1 + flags + zeros2 + zeros3 + smthg1 + smthg2 msg1 = base64.encodestring(msg1) msg1 = msg1.replace('\012', '') return msg1 def create_message2 (flags=0x8201): "" | protocol = 'NTLMSSP\x00' type = '\x01' zero3 = '\x00'*3 zero2 = '\x00'*2 domain = "WORKGROUP" dom_len = len(domain) host = "UNKNOWN" host_len = len(host) host_off = 32 dom_off = host_off + len(host) msg = "%(protocol)s%(type)s%(zero3)s%(flags)s%(zero2)s%(dom_len)02d%(dom_len)02d%(dom_off)02d00%(host_len)02d%(host_len)02d%(host_off)02d00%(host)s%(domain)s" % locals() return base64.encodestring(msg).strip() def create_message2 (flags="\x82\x01"): | def create_message1 (flags=0xb203): "" # overall lenght = 48 bytes protocol = 'NTLMSSP\000' #name type = '\001\000' #type 1 zeros1 = '\000\000' flags = utils.hex2str(flags) zeros2 = '\000\000\000\000\000\000\000\000\000' zeros3 = '\000\000\000\000\000\000\000\000\000\000\000' smthg1 = '0\000\000\000\000\000\000\000' # something with chr(48) length? smthg2 = '0\000\000\000' # something with chr(48) lenght? msg1 = protocol + type + zeros1 + flags + zeros2 + zeros3 + smthg1 + smthg2 msg1 = base64.encodestring(msg1) msg1 = msg1.replace('\012', '') return msg1 | eee8c9cb632f740c77f1c8da55a9ebd54bbc2cbd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/eee8c9cb632f740c77f1c8da55a9ebd54bbc2cbd/ntlm.py |
flags = utils.hex2str(flags) | def create_message2 (flags=0x8201): "" protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' flags = utils.hex2str(flags) nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals() | eee8c9cb632f740c77f1c8da55a9ebd54bbc2cbd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/eee8c9cb632f740c77f1c8da55a9ebd54bbc2cbd/ntlm.py |
|
return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals() def create_message3 (nonce, domain, username, host, flags=0x8201, | msg = "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals() return base64.encodestring(msg).strip() def create_message3 (nonce, domain, username, host, flags="\x82\x01", | def create_message2 (flags=0x8201): "" protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' flags = utils.hex2str(flags) nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals() | eee8c9cb632f740c77f1c8da55a9ebd54bbc2cbd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/eee8c9cb632f740c77f1c8da55a9ebd54bbc2cbd/ntlm.py |
"" flags = utils.hex2str(flags) | def create_message3 (nonce, domain, username, host, flags=0x8201, lm_hashed_pw=None, nt_hashed_pw=None, ntlm_mode=0): "" flags = utils.hex2str(flags) protocol = 'NTLMSSP\000' #name type = '\003\000' #type 3 head = protocol + type + '\000\000' domain_rec = record(domain) user_rec = record(username) host_rec = record(host) additional_rec = record('') if lm_hashed_pw: lm_rec = record(ntlm_procs.calc_resp(lm_hashed_pw, nonce)) else: lm_rec = record('') if nt_hashed_pw: nt_rec = record(ntlm_procs.calc_resp(nt_hashed_pw, nonce)) else: nt_rec = record('') # length of the head and five infos for LM, NT, Domain, User, Host domain_offset = len(head) + 5 * 8 # and unknown record info and flags' lenght if nltm_mode == 0: domain_offset = domain_offset + 8 + len(flags) # create info fields domain_rec.create_record_info(domain_offset) user_rec.create_record_info(domain_rec.next_offset) host_rec.create_record_info(user_rec.next_offset) lm_rec.create_record_info(host_rec.next_offset) nt_rec.create_record_info(lm_rec.next_offset) additional_rec.create_record_info(nt_rec.next_offset) # data part of the message 3 data_part = domain_rec.data + user_rec.data + host_rec.data + lm_rec.data + nt_rec.data # build message 3 m3 = head + lm_rec.record_info + nt_rec.record_info + \ domain_rec.record_info + user_rec.record_info + host_rec.record_info # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.record_info + flags m3 += data_part # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.data # base64 encode m3 = base64.encodestring(m3) m3 = m3.replace('\012', '') return m3 | eee8c9cb632f740c77f1c8da55a9ebd54bbc2cbd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/eee8c9cb632f740c77f1c8da55a9ebd54bbc2cbd/ntlm.py |
|
m3 = base64.encodestring(m3) m3 = m3.replace('\012', '') return m3 | return base64.encodestring(m3).strip() | def create_message3 (nonce, domain, username, host, flags=0x8201, lm_hashed_pw=None, nt_hashed_pw=None, ntlm_mode=0): "" flags = utils.hex2str(flags) protocol = 'NTLMSSP\000' #name type = '\003\000' #type 3 head = protocol + type + '\000\000' domain_rec = record(domain) user_rec = record(username) host_rec = record(host) additional_rec = record('') if lm_hashed_pw: lm_rec = record(ntlm_procs.calc_resp(lm_hashed_pw, nonce)) else: lm_rec = record('') if nt_hashed_pw: nt_rec = record(ntlm_procs.calc_resp(nt_hashed_pw, nonce)) else: nt_rec = record('') # length of the head and five infos for LM, NT, Domain, User, Host domain_offset = len(head) + 5 * 8 # and unknown record info and flags' lenght if nltm_mode == 0: domain_offset = domain_offset + 8 + len(flags) # create info fields domain_rec.create_record_info(domain_offset) user_rec.create_record_info(domain_rec.next_offset) host_rec.create_record_info(user_rec.next_offset) lm_rec.create_record_info(host_rec.next_offset) nt_rec.create_record_info(lm_rec.next_offset) additional_rec.create_record_info(nt_rec.next_offset) # data part of the message 3 data_part = domain_rec.data + user_rec.data + host_rec.data + lm_rec.data + nt_rec.data # build message 3 m3 = head + lm_rec.record_info + nt_rec.record_info + \ domain_rec.record_info + user_rec.record_info + host_rec.record_info # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.record_info + flags m3 += data_part # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.data # base64 encode m3 = base64.encodestring(m3) m3 = m3.replace('\012', '') return m3 | eee8c9cb632f740c77f1c8da55a9ebd54bbc2cbd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/eee8c9cb632f740c77f1c8da55a9ebd54bbc2cbd/ntlm.py |
def error(self, code, msg, txt=""): | def error(self, code, msg, txt=''): | def error(self, code, msg, txt=""): content = wc.proxy.HTML_TEMPLATE % \ {'title': "WebCleaner Proxy Error %d %s" % (code, msg), 'header': "Bummer!", 'content': "WebCleaner Proxy Error %d %s<br>%s<br>" % \ (code, msg, txt), } if config['proxyuser']: auth = 'Proxy-Authenticate: Basic realm="WebCleaner"\r\n' else: auth = "" ServerHandleDirectly(self.client, 'HTTP/1.0 %d %s\r\n', 'Server: WebCleaner Proxy\r\n' +\ 'Content-type: text/html\r\n' +\ '%s'%auth +\ '\r\n', content) | d6835cc5191913e3d2e6a5c36b16f6b076d9fc6c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d6835cc5191913e3d2e6a5c36b16f6b076d9fc6c/ClientServerMatchmaker.py |
{'title': "WebCleaner Proxy Error %d %s" % (code, msg), 'header': "Bummer!", 'content': "WebCleaner Proxy Error %d %s<br>%s<br>" % \ | {'title': 'WebCleaner Proxy Error %d %s' % (code, msg), 'header': 'Bummer!', 'content': 'WebCleaner Proxy Error %d %s<br>%s<br>' % \ | def error(self, code, msg, txt=""): content = wc.proxy.HTML_TEMPLATE % \ {'title': "WebCleaner Proxy Error %d %s" % (code, msg), 'header': "Bummer!", 'content': "WebCleaner Proxy Error %d %s<br>%s<br>" % \ (code, msg, txt), } if config['proxyuser']: auth = 'Proxy-Authenticate: Basic realm="WebCleaner"\r\n' else: auth = "" ServerHandleDirectly(self.client, 'HTTP/1.0 %d %s\r\n', 'Server: WebCleaner Proxy\r\n' +\ 'Content-type: text/html\r\n' +\ '%s'%auth +\ '\r\n', content) | d6835cc5191913e3d2e6a5c36b16f6b076d9fc6c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d6835cc5191913e3d2e6a5c36b16f6b076d9fc6c/ClientServerMatchmaker.py |
else: auth = "" | http_ver = '1.0' else: auth = '' http_ver = '1.1' | def error(self, code, msg, txt=""): content = wc.proxy.HTML_TEMPLATE % \ {'title': "WebCleaner Proxy Error %d %s" % (code, msg), 'header': "Bummer!", 'content': "WebCleaner Proxy Error %d %s<br>%s<br>" % \ (code, msg, txt), } if config['proxyuser']: auth = 'Proxy-Authenticate: Basic realm="WebCleaner"\r\n' else: auth = "" ServerHandleDirectly(self.client, 'HTTP/1.0 %d %s\r\n', 'Server: WebCleaner Proxy\r\n' +\ 'Content-type: text/html\r\n' +\ '%s'%auth +\ '\r\n', content) | d6835cc5191913e3d2e6a5c36b16f6b076d9fc6c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d6835cc5191913e3d2e6a5c36b16f6b076d9fc6c/ClientServerMatchmaker.py |
'HTTP/1.0 %d %s\r\n', | 'HTTP/%s %d %s\r\n' % (http_ver, code, msg), | def error(self, code, msg, txt=""): content = wc.proxy.HTML_TEMPLATE % \ {'title': "WebCleaner Proxy Error %d %s" % (code, msg), 'header': "Bummer!", 'content': "WebCleaner Proxy Error %d %s<br>%s<br>" % \ (code, msg, txt), } if config['proxyuser']: auth = 'Proxy-Authenticate: Basic realm="WebCleaner"\r\n' else: auth = "" ServerHandleDirectly(self.client, 'HTTP/1.0 %d %s\r\n', 'Server: WebCleaner Proxy\r\n' +\ 'Content-type: text/html\r\n' +\ '%s'%auth +\ '\r\n', content) | d6835cc5191913e3d2e6a5c36b16f6b076d9fc6c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d6835cc5191913e3d2e6a5c36b16f6b076d9fc6c/ClientServerMatchmaker.py |
if not self.headers.has_key("Proxy-Authorization"): | if not self.check_proxy_auth(): | def __init__(self, client, request, headers, content, nofilter): self.client = client self.request = request self.headers = headers if config["proxyuser"]: if not self.headers.has_key("Proxy-Authorization"): self.error(407, _("Proxy Authentication Required")) return auth = self.headers['Proxy-Authorization'] # XXX more self.content = content self.nofilter = nofilter self.url = "" try: self.method, self.url, protocol = request.split() except: config['requests']['error'] += 1 self.error(400, _("Can't parse request")) return if not self.url: config['requests']['error'] += 1 self.error(400, _("Empty URL")) return scheme, hostname, port, document = wc.proxy.spliturl(self.url) #debug(HURT_ME_PLENTY, "splitted url", scheme, hostname, port, document) if scheme=='file': # a blocked url is a local file:// link # this means we should _not_ use this proxy for local # file links :) mtype = mimetypes.guess_type(self.url)[0] config['requests']['valid'] += 1 config['requests']['blocked'] += 1 ServerHandleDirectly(self.client, 'HTTP/1.0 200 OK\r\n', 'Content-Type: %s\r\n\r\n' % (mtype or 'application/octet-stream'), open(document, 'rb').read()) return | d6835cc5191913e3d2e6a5c36b16f6b076d9fc6c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d6835cc5191913e3d2e6a5c36b16f6b076d9fc6c/ClientServerMatchmaker.py |
auth = self.headers['Proxy-Authorization'] | def __init__(self, client, request, headers, content, nofilter): self.client = client self.request = request self.headers = headers if config["proxyuser"]: if not self.headers.has_key("Proxy-Authorization"): self.error(407, _("Proxy Authentication Required")) return auth = self.headers['Proxy-Authorization'] # XXX more self.content = content self.nofilter = nofilter self.url = "" try: self.method, self.url, protocol = request.split() except: config['requests']['error'] += 1 self.error(400, _("Can't parse request")) return if not self.url: config['requests']['error'] += 1 self.error(400, _("Empty URL")) return scheme, hostname, port, document = wc.proxy.spliturl(self.url) #debug(HURT_ME_PLENTY, "splitted url", scheme, hostname, port, document) if scheme=='file': # a blocked url is a local file:// link # this means we should _not_ use this proxy for local # file links :) mtype = mimetypes.guess_type(self.url)[0] config['requests']['valid'] += 1 config['requests']['blocked'] += 1 ServerHandleDirectly(self.client, 'HTTP/1.0 200 OK\r\n', 'Content-Type: %s\r\n\r\n' % (mtype or 'application/octet-stream'), open(document, 'rb').read()) return | d6835cc5191913e3d2e6a5c36b16f6b076d9fc6c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/d6835cc5191913e3d2e6a5c36b16f6b076d9fc6c/ClientServerMatchmaker.py |
|
version = "3.0", | version = "2.37.1", | def run (self): if self.all: # remove share directory directory = os.path.join("build", "share") if os.path.exists(directory): remove_tree(directory, dry_run=self.dry_run) else: distutils.log.warn("'%s' does not exist -- can't clean it", directory) clean.run(self) | 559963c04f7f520eecc034d26a0d81ca51ab7006 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/559963c04f7f520eecc034d26a0d81ca51ab7006/setup.py |
return email.parseaddr(address)[1] | cleaned = parseaddr(address) if not cleaned[0]: return cleaned[1] return '%s <%s>'%cleaned | def valid_mail (address): """return cleaned up mail, or an empty string on errors""" return email.parseaddr(address)[1] | 83ef4103816d8105fb34b696537246472ac7b80b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/83ef4103816d8105fb34b696537246472ac7b80b/mail.py |
new_url = client.scheme+"://"+answer.data | new_url = self.client.scheme+"://"+answer.data | def handle_dns (self, hostname, answer): assert self.state == 'dns' debug(PROXY, "%s handle dns", self) if not self.client.connected: warn(PROXY, "%s client closed after DNS", self) # The browser has already closed this connection, so abort return if answer.isFound(): self.ipaddr = answer.data[0] self.state = 'server' self.find_server() elif answer.isRedirect(): # Let's use a different hostname new_url = client.scheme+"://"+answer.data if self.port != 80: new_url += ':%d' % self.port # XXX does not work with parent proxy new_url += self.document info(PROXY, "%s redirecting %r", self, new_url) self.state = 'done' # XXX find http version! ServerHandleDirectly( self.client, '%s 301 Moved Permanently' % self.protocol, 301, WcMessage(StringIO('Content-type: text/plain\r\n' 'Location: %s\r\n\r\n' % new_url)), i18n._('Host %s is an abbreviation for %s')%(hostname, answer.data)) else: # Couldn't look up the host, # close this connection self.state = 'done' self.client.error(504, i18n._("Host not found"), i18n._('Host %s not found .. %s')%(hostname, answer.data)) | 07b076dbb7836f51476edcc709d7ea5fed1c2fd2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/07b076dbb7836f51476edcc709d7ea5fed1c2fd2/ClientServerMatchmaker.py |
handler = WcRotatingFileHandler(logfile, mode, maxBytes, backupCount) | handler = RotatingFileHandler(logfile, mode, maxBytes, backupCount) | def get_root_handler (): """return a handler for basic logging""" if os.name=="nt": from logging.handlers import NTEventLogHandler return set_format(NTEventLogHandler(Name)) logfile = get_log_file("%s.err"%Name) mode = 'a' maxBytes = 1024*1024*2 # 2 MB backupCount = 5 # number of files to generate handler = WcRotatingFileHandler(logfile, mode, maxBytes, backupCount) return set_format(handler) | cd61b5d0732d9cf7c5fc78fcf0449f7ed23e9a73 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cd61b5d0732d9cf7c5fc78fcf0449f7ed23e9a73/log.py |
handler = WcRotatingFileHandler(logfile, mode, maxBytes, backupCount) | handler = RotatingFileHandler(logfile, mode, maxBytes, backupCount) | def get_wc_handler (): """return a handler for webcleaner logging""" if os.name=="nt": from logging.handlers import NTEventLogHandler return set_format(NTEventLogHandler(Name)) logfile = get_log_file("%s.log"%Name) mode = 'a' maxBytes = 1024*1024*2 # 2 MB backupCount = 5 # number of files to generate handler = WcRotatingFileHandler(logfile, mode, maxBytes, backupCount) return set_format(handler) | cd61b5d0732d9cf7c5fc78fcf0449f7ed23e9a73 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cd61b5d0732d9cf7c5fc78fcf0449f7ed23e9a73/log.py |
handler = WcRotatingFileHandler(logfile, mode, maxBytes, backupCount) | handler = RotatingFileHandler(logfile, mode, maxBytes, backupCount) | def get_access_handler (): """return a handler for access logging""" logfile = get_log_file("%s-access.log"%Name) mode = 'a' maxBytes = 1024*1024*2 # 2 MB backupCount = 5 # number of files to generate handler = WcRotatingFileHandler(logfile, mode, maxBytes, backupCount) # log only the message handler.setFormatter(logging.Formatter("%(message)s")) return handler | cd61b5d0732d9cf7c5fc78fcf0449f7ed23e9a73 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cd61b5d0732d9cf7c5fc78fcf0449f7ed23e9a73/log.py |
class WcRotatingFileHandler (RotatingFileHandler): def emit (self, record): """ A little more verbose emit function. """ try: msg = self.format(record) self.stream.write("%s\n" % msg) self.flush() except: print >>sys.stderr, "Could not format record", record self.handleError(record) | def get_last_word_boundary (s, width): """Get maximal index i of a whitespace char in s with 0 < i < width. Note: if s contains no whitespace this returns width-1""" match = re.compile(".*\s").match(s[0:width]) if match: return match.end() return width-1 | cd61b5d0732d9cf7c5fc78fcf0449f7ed23e9a73 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cd61b5d0732d9cf7c5fc78fcf0449f7ed23e9a73/log.py |
|
ret.add("%s/%d" % (net, mask2suffix(mask))) | ret.add("%s/%d" % (num2dq(net), mask2suffix(mask))) | def map2hosts (hostmap): ret = hostmap[0].copy() for net, mask in hostmap[1]: ret.add("%s/%d" % (net, mask2suffix(mask))) return ret | 1a584930b76e3762c682e1ca728c15730bd1d4d9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/1a584930b76e3762c682e1ca728c15730bd1d4d9/ip.py |
hosts, nets = hosts2map(["192.168.1.1/16"]) for net, mask in nets: print num2dq(net), mask2suffix(mask) | hosts = ["192.168.1.1/16"] hostmap = hosts2map(hosts) print hostmap print map2hosts(hostmap) | def _test (): hosts, nets = hosts2map(["192.168.1.1/16"]) for net, mask in nets: print num2dq(net), mask2suffix(mask) | 1a584930b76e3762c682e1ca728c15730bd1d4d9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/1a584930b76e3762c682e1ca728c15730bd1d4d9/ip.py |
magiccache = magicfile+".mgc" | magiccache = magicfile + ".mgc" | def classify (fp, magicdir=wc.ConfigDir): """ Classify a file. """ global _magic if _magic is None: # initialize mime data magicfile = os.path.join(magicdir, "magic.mime") assert os.path.exists(magicfile) magiccache = magicfile+".mgc" _magic = Magic(magicfile, magiccache) pos = fp.tell() mime = _magic.classify(fp) fp.seek(pos) if mime: # split off any trailing info return mime.split()[0] return None | a65e9da4792a85c74c5472c300e688791388a325 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/a65e9da4792a85c74c5472c300e688791388a325/__init__.py |
result = result.lstrip('\x08').strip().replace(' \x08', '') | result = result.lstrip('\x08').replace(' \x08', '').strip() | def classify (self, f): if not self.entries: raise StandardError("Not initialised properly") # Are we still looking for the ruleset to apply or are we in a rule found_rule = False # If we failed part of the rule there is no point looking for # higher level subrule allow_next = 0 # String provided by the successfull rule result = "" | a65e9da4792a85c74c5472c300e688791388a325 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/a65e9da4792a85c74c5472c300e688791388a325/__init__.py |
return '<Listener:%s>' % self.addr | return '<Listener:%s>' % str(self.addr) | def __repr__ (self): """return listener class and address""" return '<Listener:%s>' % self.addr | dd51273ff91c062a037181d48648f9c55fd6ce0a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/dd51273ff91c062a037181d48648f9c55fd6ce0a/Listener.py |
self.ratings[service] = {} | self.ratings[self.service] = {} | def fill_attrs (self, attrs, name): if name=='pics': UrlRule.fill_attrs(self, attrs, name) elif name=='service': self.service = unxmlify(attrs.get('name')).encode('iso8859-1') self.ratings[service] = {} elif name=='category': assert self.service self.category = unxmlify(attrs.get('name')).encode('iso8859-1') else: raise ValueError(i18n._("Invalid pics rule tag name `%s',"+\ " check your configuration")%name) | 6c0bd38421f7a0042ce2bea8107ff49e0b5b9231 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/6c0bd38421f7a0042ce2bea8107ff49e0b5b9231/PicsRule.py |
pid = ing(file(pidfile).read()) | pid = int(file(pidfile).read()) | def status (pidfile): if os.path.exists(pidfile): pid = ing(file(pidfile).read()) return i18n._("WebCleaner is running (PID %d)")%pid, 0 else: return i18n._("WebCleaner is not running (no lock file found)"), 3 | 2ea0d1d113b6cda31008c0e861b40f9bbed20881 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/2ea0d1d113b6cda31008c0e861b40f9bbed20881/__init__.py |
from wc import BaseUrl | doreload = False from wc import update, BaseUrl, Configuration | def onCmdConfUpdate (self, sender, sel, ptr): """download files from http://webcleaner.sourceforge.net/zapper/ and copy them over the existing config""" # base url for all files from wc import BaseUrl dialog = FXMessageBox(self,i18n._("Update Help"),UpdateHelp % BaseUrl,None,MBOX_OK_CANCEL) if self.getApp().doShow(dialog) != MBOX_CLICKED_OK: return 1 try: # XXX log into window wc.update.update(wc.config, BaseUrl) wc.config.write_filterconf() except IOError, msg: self.getApp().error(i18n._("Update Error"), "%s: %s" % (i18n._("Update Error"), msg)) else: if doreload: self.handle(self, MKUINT(ConfWindow.ID_PROXYRELOAD,SEL_COMMAND), None) return 1 | 97a00b6551c9fe1180c1c220e89c5f389b6d2c59 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/97a00b6551c9fe1180c1c220e89c5f389b6d2c59/ConfWindow.py |
wc.update.update(wc.config, BaseUrl) wc.config.write_filterconf() | doreload = update.update(config, BaseUrl, dryrun=True) config.write_filterconf() | def onCmdConfUpdate (self, sender, sel, ptr): """download files from http://webcleaner.sourceforge.net/zapper/ and copy them over the existing config""" # base url for all files from wc import BaseUrl dialog = FXMessageBox(self,i18n._("Update Help"),UpdateHelp % BaseUrl,None,MBOX_OK_CANCEL) if self.getApp().doShow(dialog) != MBOX_CLICKED_OK: return 1 try: # XXX log into window wc.update.update(wc.config, BaseUrl) wc.config.write_filterconf() except IOError, msg: self.getApp().error(i18n._("Update Error"), "%s: %s" % (i18n._("Update Error"), msg)) else: if doreload: self.handle(self, MKUINT(ConfWindow.ID_PROXYRELOAD,SEL_COMMAND), None) return 1 | 97a00b6551c9fe1180c1c220e89c5f389b6d2c59 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/97a00b6551c9fe1180c1c220e89c5f389b6d2c59/ConfWindow.py |
wc.wstartfunc(handle=self.hWaitStop, confdir=self.configdir, | wc.start.wstartfunc(handle=self.hWaitStop, confdir=self.configdir, | def SvcDoRun (self): """start this service""" import servicemanager # Log a "started" message to the event log. servicemanager.LogMsg( servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, '')) wc.wstartfunc(handle=self.hWaitStop, confdir=self.configdir, filelogs=self.filelogs) # Now log a "service stopped" message servicemanager.LogMsg( servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STOPPED, (self._svc_name_,'')) | 80947f14b3d7d6cd80f06e3aa1aa5b171e157f39 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/80947f14b3d7d6cd80f06e3aa1aa5b171e157f39/win32start.py |
if self.rulestack and self.rulestack[-1][1][0].match_tag(tag): | if self.rulestack and self.rulestack[-1][1][0].match_tag(tag) and \ self.stackcount[-1][0]==tag and self.stackcount[-1][1]<=0: del self.stackcount[-1] | def filterEndElement (self, tag): # remember: self.rulestack[-1][1] is the rulelist that # matched for a start tag. and if the first one ([0]) # matches, all other match too if self.rulestack and self.rulestack[-1][1][0].match_tag(tag): pos, rulelist = self.rulestack.pop() for rule in rulelist: if rule.match_complete(pos, self.buf): rule.filter_complete(pos, self.buf) return True return False | b0c347ed3a53d2ce86a278c37108dc604fde343d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/b0c347ed3a53d2ce86a278c37108dc604fde343d/HtmlParser.py |
if image_re.match(urlTuple[3][-4:]): return '%s %s %s' % (method, blocked or self.block_image, 'image/gif') | if blocked: doc = blocked elif image_re.match(urlTuple[3][-4:]): doc = self.block_image | def doit (self, data, **args): debug(FILTER, "block filter working on %s", `data`) splitted = data.split() if len(splitted)!=3: error(FILTER, "invalid request: %s", `data`) return data method,url,protocol = splitted urlTuple = list(urlparse.urlparse(url)) netloc = urlTuple[1] s = netloc.split(":") if len(s)==2: urlTuple[1:2] = s else: urlTuple[1:2] = [netloc,80] if self.allowed(urlTuple): return data blocked = self.strict_whitelist or self.blocked(urlTuple) if blocked is not None: debug(FILTER, "blocked url %s", url) # index 3, not 2! if image_re.match(urlTuple[3][-4:]): return '%s %s %s' % (method, blocked or self.block_image, 'image/gif') else: # XXX hmmm, what about CGI images? # make HTTP HEAD request? return '%s %s %s' % (method, blocked or self.block_url, 'text/html') return data | ae583cb21e7ac3b4e5a9b35d754719b5c63beaa1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ae583cb21e7ac3b4e5a9b35d754719b5c63beaa1/Blocker.py |
return '%s %s %s' % (method, blocked or self.block_url, 'text/html') | doc = self.block_url return '%s %s HTTP/1.1' % (method, doc) | def doit (self, data, **args): debug(FILTER, "block filter working on %s", `data`) splitted = data.split() if len(splitted)!=3: error(FILTER, "invalid request: %s", `data`) return data method,url,protocol = splitted urlTuple = list(urlparse.urlparse(url)) netloc = urlTuple[1] s = netloc.split(":") if len(s)==2: urlTuple[1:2] = s else: urlTuple[1:2] = [netloc,80] if self.allowed(urlTuple): return data blocked = self.strict_whitelist or self.blocked(urlTuple) if blocked is not None: debug(FILTER, "blocked url %s", url) # index 3, not 2! if image_re.match(urlTuple[3][-4:]): return '%s %s %s' % (method, blocked or self.block_image, 'image/gif') else: # XXX hmmm, what about CGI images? # make HTTP HEAD request? return '%s %s %s' % (method, blocked or self.block_url, 'text/html') return data | ae583cb21e7ac3b4e5a9b35d754719b5c63beaa1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/ae583cb21e7ac3b4e5a9b35d754719b5c63beaa1/Blocker.py |
if not self.rulestack: | if not self.rulestack and \ (not self.javascript or tag!='script'): | def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" rulelist = [] filtered = 0 # default data tobuffer = (STARTTAG, tag, attrs) # look for filter rules which apply for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): #debug(NIGHTMARE, "matched rule %s on tag %s" % (`rule.title`, `tag`)) if rule.start_sufficient: tobuffer = rule.filter_tag(tag, attrs) filtered = "True" # give'em a chance to replace more than one attribute if tobuffer[0]==STARTTAG and tobuffer[1]==tag: foo,tag,attrs = tobuffer continue else: break else: #debug(NIGHTMARE, "put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.buffer) self.rulestack.append((pos, rulelist)) # if its not yet filtered, try filter javascript if filtered: self.buffer_append_data(tobuffer) elif self.javascript: self.jsStartElement(tag, attrs) else: self.buffer.append(tobuffer) # if rule stack is empty, write out the buffered data if not self.rulestack: self.buffer2data() | 6bbaf16e19b35209ab5b0b88d71113c50eee4301 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/6bbaf16e19b35209ab5b0b88d71113c50eee4301/Rewriter.py |
self.jsEnv.executeScriptAsFunction(val) | self.jsEnv.executeScriptAsFunction(val, 0.0) | def jsPopup (self, attrs, name): """check if attrs[name] javascript opens a popup window""" val = attrs[name] if not val: return self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val) self.jsEnv.detachListener(self) return self.popup_counter | 6bbaf16e19b35209ab5b0b88d71113c50eee4301 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/6bbaf16e19b35209ab5b0b88d71113c50eee4301/Rewriter.py |
pass | print >>sys.stderr, "JS:", data | def processData (self, data): # XXX pass | 6bbaf16e19b35209ab5b0b88d71113c50eee4301 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/6bbaf16e19b35209ab5b0b88d71113c50eee4301/Rewriter.py |
if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last return script = last[1].strip() if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val, 0.0) self.jsEnv.detachListener(self) | def jsEndElement (self, tag): """parse generated html for scripts""" if tag!='script': return # XXX | 6bbaf16e19b35209ab5b0b88d71113c50eee4301 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/6bbaf16e19b35209ab5b0b88d71113c50eee4301/Rewriter.py |
|
return "\n".join(HEADERS.getall()) or "-" | return "\n".join(wc.proxy.HEADERS.getall()) or "-" | def text_headers (): return "\n".join(HEADERS.getall()) or "-" | 1e914e9dde1dbe205a651f182b7e4c27396bd4fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/1e914e9dde1dbe205a651f182b7e4c27396bd4fc/WebConfig.py |
debug(PROXY, '%s <= read %d', str(self), len(data)) | def handle_read (self): assert self.connected | 80a0fe834009640624bbc33417de676794186569 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/80a0fe834009640624bbc33417de676794186569/Connection.py |
|
klass = wc.filter.rating.storage.pickle.PickleStorage rating_store = wc.filter.rating.storage.get_rating_store(klass) | rating_store = wc.filter.rating.get_ratings() | def rating_allow (self, url): """ Asks cache if the rule allows the rating data for given url Looks up cache to find rating data, if not returns a MISSING message. """ klass = wc.filter.rating.storage.pickle.PickleStorage rating_store = wc.filter.rating.storage.get_rating_store(klass) # sanitize url url = wc.filter.rating.make_safe_url(url) if url in rating_store: return self.check_against(rating_store[url]) return MISSING | 7db38c2b23ac48e695a60a6b012f8f332a1185cb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/7db38c2b23ac48e695a60a6b012f8f332a1185cb/RatingRule.py |
if os.name=='nt': macros = [('YY_NO_UNISTD_H', None)] else: macros = [] | def create_batch_file(self, directory, data, filename): filename = os.path.join(directory, filename) # write the batch file util.execute(write_file, (filename, data), "creating %s" % filename, self.verbose>=1, self.dry_run) | 474bdd9f95e1585d851798c4c7bce933aabc4e0e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/474bdd9f95e1585d851798c4c7bce933aabc4e0e/setup.py |
|
import wc | import wc.configuration | def get_wc_config (): """ Get WebCleaner configuration object. """ global _wc_config if _wc_config is None: import wc _wc_config = wc.configuration.init() return _wc_config | 489789e4178040fa44de75dd9b0915fb9b21bcd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/489789e4178040fa44de75dd9b0915fb9b21bcd3/install-webcleaner.py |
debug(PROXY, 'Proxy: reserve_server %s %s', str(addr), str(server)) | debug(PROXY, 'pool reserve %s %s', str(addr), str(server)) | def reserve_server (self, addr): for server,status in self.map.get(addr, {}).items(): if status[0] == 'available': # Let's reuse this one self.map[addr][server] = ('busy', ) debug(PROXY, 'Proxy: reserve_server %s %s', str(addr), str(server)) return server return None | 8fa004b39b4a5c3551e8d2294d084af9e0055974 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/8fa004b39b4a5c3551e8d2294d084af9e0055974/ServerPool.py |
callback(self.hostname, DnsResponse('found', ip_addrs)) | if self.hostname[-4:] in ('.com','.net') and \ '64.94.110.11' in ip_addrs: callback(self.hostname, DnsResponse('error', 'not found')) else: callback(self.hostname, DnsResponse('found', ip_addrs)) | def process_read (self): # Assume that the entire answer comes in one packet if self.conntype == 'tcp': if len(self.recv_buffer) < 2: return header = self.recv_buffer[:2] count = dnslib.Lib.unpack16bit(header) if len(self.recv_buffer) < 2+count: return self.read(2) # header data = self.read(count) self.socket.shutdown(1) else: data = self.read(1024) | 5d368034136eb4e32506e0f377c136826681c818 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5d368034136eb4e32506e0f377c136826681c818/dns_lookups.py |
import wc | import wc.configuration | def _main (): """USAGE: test/run.sh test/getssl.py <https url>""" if len(sys.argv)!=2: print _main.__doc__ sys.exit(1) import wc wc.configuration.config = wc.configuration.init() port = config['port'] sslport = config['sslport'] request(sys.argv[1], sslport) #rawrequest(sys.argv[1], sslport) #rawrequest2(sys.argv[1], sslport) rawrequest3(sys.argv[1], port) | 34e7b10884f374e9460ba7a506910dec8edf816c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/34e7b10884f374e9460ba7a506910dec8edf816c/getssl.py |
port = config['port'] sslport = config['sslport'] | port = wc.configuration.config['port'] sslport = wc.configuration.config['sslport'] | def _main (): """USAGE: test/run.sh test/getssl.py <https url>""" if len(sys.argv)!=2: print _main.__doc__ sys.exit(1) import wc wc.configuration.config = wc.configuration.init() port = config['port'] sslport = config['sslport'] request(sys.argv[1], sslport) #rawrequest(sys.argv[1], sslport) #rawrequest2(sys.argv[1], sslport) rawrequest3(sys.argv[1], port) | 34e7b10884f374e9460ba7a506910dec8edf816c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/34e7b10884f374e9460ba7a506910dec8edf816c/getssl.py |
if ctype not in (0, 2): raise IOError("Invalid NTLM challenge type") | def get_ntlm_challenge (**attrs): """return initial challenge token for ntlm authentication""" ctype = attrs.get('type', 0) if ctype not in (0, 2): raise IOError("Invalid NTLM challenge type") if ctype==0: # initial challenge return "NTLM" if ctype==2: # after getting first credentials return "NTLM %s" % base64.encodestring(create_message2()).strip() | cc86366997daabd45b2dc5de8c6ad1f3d23bea10 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cc86366997daabd45b2dc5de8c6ad1f3d23bea10/ntlm.py |
|
if ctype==2: | elif ctype==2: | def get_ntlm_challenge (**attrs): """return initial challenge token for ntlm authentication""" ctype = attrs.get('type', 0) if ctype not in (0, 2): raise IOError("Invalid NTLM challenge type") if ctype==0: # initial challenge return "NTLM" if ctype==2: # after getting first credentials return "NTLM %s" % base64.encodestring(create_message2()).strip() | cc86366997daabd45b2dc5de8c6ad1f3d23bea10 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cc86366997daabd45b2dc5de8c6ad1f3d23bea10/ntlm.py |
if ctype not in (1, 3): raise IOError("Invalid NTLM credentials type") | def get_ntlm_credentials (challenge, **attrs): ctype = attrs.get('type', 1) if ctype not in (1, 3): raise IOError("Invalid NTLM credentials type") if ctype==1: msg = create_message1() elif ctype==3: nonce = attrs['nonce'] domain = attrs['domain'] username = attrs['username'] host = attrs['host'] msg = create_message3(nonce, domain, username, host) return "NTLM %s" % base64.encodestring(msg).strip() | cc86366997daabd45b2dc5de8c6ad1f3d23bea10 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cc86366997daabd45b2dc5de8c6ad1f3d23bea10/ntlm.py |
|
def get_ntlm_type3_message (**attrs): | def get_ntlm_type3_message (**attrs): # extract the required attributes | cc86366997daabd45b2dc5de8c6ad1f3d23bea10 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/cc86366997daabd45b2dc5de8c6ad1f3d23bea10/ntlm.py |
|
lineno = frame.f_lineno filename = frame.f_globals["__file__"] if filename.endswith(".pyc") or filename.endswith(".pyo"): filename = filename[:-1] | def _traceit (frame, event, arg): """ Print current executed line. """ if event == "line": lineno = frame.f_lineno filename = frame.f_globals["__file__"] if filename.endswith(".pyc") or filename.endswith(".pyo"): filename = filename[:-1] name = frame.f_globals["__name__"] line = linecache.getline(filename, lineno) info(tracelog, "%s:%s: %s", name, lineno, line.rstrip()) return _traceit | 20bd07a826aadaf358497702efb933ef4b5ec736 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/20bd07a826aadaf358497702efb933ef4b5ec736/log.py |
|
line = linecache.getline(filename, lineno) info(tracelog, "%s:%s: %s", name, lineno, line.rstrip()) | if name not in _trace_ignore: lineno = frame.f_lineno filename = frame.f_globals["__file__"] if filename.endswith(".pyc") or filename.endswith(".pyo"): filename = filename[:-1] line = linecache.getline(filename, lineno) print "THREAD(%d) %s:%d: %s" % \ (_thread.get_ident(), name, lineno, line.rstrip()) | def _traceit (frame, event, arg): """ Print current executed line. """ if event == "line": lineno = frame.f_lineno filename = frame.f_globals["__file__"] if filename.endswith(".pyc") or filename.endswith(".pyo"): filename = filename[:-1] name = frame.f_globals["__name__"] line = linecache.getline(filename, lineno) info(tracelog, "%s:%s: %s", name, lineno, line.rstrip()) return _traceit | 20bd07a826aadaf358497702efb933ef4b5ec736 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/20bd07a826aadaf358497702efb933ef4b5ec736/log.py |
parts = wc.filter.rating.split_url(url) | parts = split_url(url) | def make_safe_url (url): """Remove unsafe parts of url for rating cache check.""" parts = wc.filter.rating.split_url(url) pathparts = [make_safe_part(x) for x in parts[2:]] pathparts[0:2] = parts[0:2] return "".join(pathparts) | 749786eada2a86317dd7f3d8900d5b69a36810e3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/749786eada2a86317dd7f3d8900d5b69a36810e3/ratingstorage.py |
if not wc.url.is_safe_url(url): raise ValueError("Invalid rating url %r." % url) | if wc.url.is_safe_url(url): return url return make_safe_url(url) | def check_url (self, url): """If url is not safe raise a ValueError.""" if not wc.url.is_safe_url(url): raise ValueError("Invalid rating url %r." % url) | 749786eada2a86317dd7f3d8900d5b69a36810e3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/749786eada2a86317dd7f3d8900d5b69a36810e3/ratingstorage.py |
self.check_url(url) | url = self.check_url(url) | def __setitem__ (self, url, rating): """Add rating for given url.""" self.check_url(url) self.cache[url] = rating | 749786eada2a86317dd7f3d8900d5b69a36810e3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/749786eada2a86317dd7f3d8900d5b69a36810e3/ratingstorage.py |
self.check_url(url) | url = self.check_url(url) | def __getitem__ (self, url): """Get rating for given url.""" self.check_url(url) # use a specialized form of longest prefix matching: # split the url in parts and the longest matching part wins parts = split_url(url) # the range selects from all parts (full url) down to the first two parts for i in range(len(parts), 1, -1): url = "".join(parts[:i]) if url in self.cache: return self.cache[url] raise KeyError(url) | 749786eada2a86317dd7f3d8900d5b69a36810e3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/749786eada2a86317dd7f3d8900d5b69a36810e3/ratingstorage.py |
def url_norm (url): """Normalize the given URL which must be quoted. Supports unicode hostnames (IDNA encoding) according to RFC 3490. @return (normed url, idna flag) """ urlparts = list(urlparse.urlsplit(url)) urlparts[0] = urllib.unquote(urlparts[0]).lower() is_idn = url_fix_host(urlparts) | def url_fix_mailto_urlsplit (urlparts): """Split query part of mailto url if found.""" if "?" in urlparts[2]: urlparts[2], urlparts[3] = urlparts[2].split('?', 1) def url_parse_query (query): """Parse and re-join the given CGI query.""" | def url_norm (url): """Normalize the given URL which must be quoted. Supports unicode hostnames (IDNA encoding) according to RFC 3490. @return (normed url, idna flag) """ urlparts = list(urlparse.urlsplit(url)) # scheme urlparts[0] = urllib.unquote(urlparts[0]).lower() # host (with path or query side effects) is_idn = url_fix_host(urlparts) # query l = [] for k, v in parse_qsl(urlparts[3], True): k = urllib.quote(k, '/-:,') if v: v = urllib.quote(v, '/-:,') l.append("%s=%s" % (k, v)) elif v is None: l.append(k) else: # some sites do not work when the equal sign is missing l.append("%s=" % k) urlparts[3] = '&'.join(l) if not urlparts[2]: # empty path should be a slash, but not in certain schemes # note that in relative links, urlparts[0] might be empty # in this case, do not make any assumptions if urlparts[0] and urlparts[0] not in urlparse.non_hierarchical: urlparts[2] = '/' else: # fix redundant path parts urlparts[2] = collapse_segments(urlparts[2]) # quote parts again urlparts[0] = urllib.quote(urlparts[0]) # scheme urlparts[1] = urllib.quote(urlparts[1], '@:') # host urlparts[2] = urllib.quote(urlparts[2], _nopathquote_chars) # path res = urlparse.urlunsplit(urlparts) if url.endswith('#') and not urlparts[4]: # re-append trailing empty fragment res += '#' return (res, is_idn) | 188b09c12c54fe3a247a7f70ca9b077077fea744 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/188b09c12c54fe3a247a7f70ca9b077077fea744/url.py |
for k, v in parse_qsl(urlparts[3], True): | for k, v in parse_qsl(query, True): | def url_norm (url): """Normalize the given URL which must be quoted. Supports unicode hostnames (IDNA encoding) according to RFC 3490. @return (normed url, idna flag) """ urlparts = list(urlparse.urlsplit(url)) # scheme urlparts[0] = urllib.unquote(urlparts[0]).lower() # host (with path or query side effects) is_idn = url_fix_host(urlparts) # query l = [] for k, v in parse_qsl(urlparts[3], True): k = urllib.quote(k, '/-:,') if v: v = urllib.quote(v, '/-:,') l.append("%s=%s" % (k, v)) elif v is None: l.append(k) else: # some sites do not work when the equal sign is missing l.append("%s=" % k) urlparts[3] = '&'.join(l) if not urlparts[2]: # empty path should be a slash, but not in certain schemes # note that in relative links, urlparts[0] might be empty # in this case, do not make any assumptions if urlparts[0] and urlparts[0] not in urlparse.non_hierarchical: urlparts[2] = '/' else: # fix redundant path parts urlparts[2] = collapse_segments(urlparts[2]) # quote parts again urlparts[0] = urllib.quote(urlparts[0]) # scheme urlparts[1] = urllib.quote(urlparts[1], '@:') # host urlparts[2] = urllib.quote(urlparts[2], _nopathquote_chars) # path res = urlparse.urlunsplit(urlparts) if url.endswith('#') and not urlparts[4]: # re-append trailing empty fragment res += '#' return (res, is_idn) | 188b09c12c54fe3a247a7f70ca9b077077fea744 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/188b09c12c54fe3a247a7f70ca9b077077fea744/url.py |
urlparts[3] = '&'.join(l) | return '&'.join(l) def url_norm (url): """Normalize the given URL which must be quoted. Supports unicode hostnames (IDNA encoding) according to RFC 3490. @return (normed url, idna flag) """ urlparts = list(urlparse.urlsplit(url)) urlparts[0] = urllib.unquote(urlparts[0]).lower() if urlparts[0] == 'mailto': url_fix_mailto_urlsplit(urlparts) is_idn = url_fix_host(urlparts) urlparts[3] = url_parse_query(urlparts[3]) | def url_norm (url): """Normalize the given URL which must be quoted. Supports unicode hostnames (IDNA encoding) according to RFC 3490. @return (normed url, idna flag) """ urlparts = list(urlparse.urlsplit(url)) # scheme urlparts[0] = urllib.unquote(urlparts[0]).lower() # host (with path or query side effects) is_idn = url_fix_host(urlparts) # query l = [] for k, v in parse_qsl(urlparts[3], True): k = urllib.quote(k, '/-:,') if v: v = urllib.quote(v, '/-:,') l.append("%s=%s" % (k, v)) elif v is None: l.append(k) else: # some sites do not work when the equal sign is missing l.append("%s=" % k) urlparts[3] = '&'.join(l) if not urlparts[2]: # empty path should be a slash, but not in certain schemes # note that in relative links, urlparts[0] might be empty # in this case, do not make any assumptions if urlparts[0] and urlparts[0] not in urlparse.non_hierarchical: urlparts[2] = '/' else: # fix redundant path parts urlparts[2] = collapse_segments(urlparts[2]) # quote parts again urlparts[0] = urllib.quote(urlparts[0]) # scheme urlparts[1] = urllib.quote(urlparts[1], '@:') # host urlparts[2] = urllib.quote(urlparts[2], _nopathquote_chars) # path res = urlparse.urlunsplit(urlparts) if url.endswith('#') and not urlparts[4]: # re-append trailing empty fragment res += '#' return (res, is_idn) | 188b09c12c54fe3a247a7f70ca9b077077fea744 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/188b09c12c54fe3a247a7f70ca9b077077fea744/url.py |
return match_host(spliturl(url)[1], domainlist) | return match_host(url_split(url)[1], domainlist) | def match_url (url, domainlist): """return True if host part of url matches an entry in given domain list""" if not url: return False return match_host(spliturl(url)[1], domainlist) | 188b09c12c54fe3a247a7f70ca9b077077fea744 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/188b09c12c54fe3a247a7f70ca9b077077fea744/url.py |
s2 = "11 22%(sep)s33 44%(sep)s55" % {'sep': os.linesep} | l = len(os.linesep) gap = " "*l s2 = "11%(gap)s22%(sep)s33%(gap)s44%(sep)s55" % \ {'sep': os.linesep, 'gap': gap} | def test_wrap (self): """test line wrapping""" s = "11%(sep)s22%(sep)s33%(sep)s44%(sep)s55" % {'sep': os.linesep} # testing width <= 0 self.assertEquals(wc.strformat.wrap(s, -1), s) self.assertEquals(wc.strformat.wrap(s, 0), s) s2 = "11 22%(sep)s33 44%(sep)s55" % {'sep': os.linesep} # splitting lines self.assertEquals(wc.strformat.wrap(s2, 2), s) # combining lines self.assertEquals(wc.strformat.wrap(s, 5), s2) | 0ea4fc9072b835da1357487274bb91e31e1b29f6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/0ea4fc9072b835da1357487274bb91e31e1b29f6/test_strformat.py |
self.assertEquals(wc.strformat.wrap(s, 5), s2) | self.assertEquals(wc.strformat.wrap(s, 4+l), s2) | def test_wrap (self): """test line wrapping""" s = "11%(sep)s22%(sep)s33%(sep)s44%(sep)s55" % {'sep': os.linesep} # testing width <= 0 self.assertEquals(wc.strformat.wrap(s, -1), s) self.assertEquals(wc.strformat.wrap(s, 0), s) s2 = "11 22%(sep)s33 44%(sep)s55" % {'sep': os.linesep} # splitting lines self.assertEquals(wc.strformat.wrap(s2, 2), s) # combining lines self.assertEquals(wc.strformat.wrap(s, 5), s2) | 0ea4fc9072b835da1357487274bb91e31e1b29f6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/0ea4fc9072b835da1357487274bb91e31e1b29f6/test_strformat.py |
def p (path): | def normpath (path): | def p (path): """norm a path name to platform specific notation""" return os.path.normpath(path) | bc3e6db3386fff5a9a56773213e6c935859ec279 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/bc3e6db3386fff5a9a56773213e6c935859ec279/setup.py |
os.path.normcase(os.path.join(base, 'config'))) | cnormpath(os.path.join(base, 'config'))) | def run (self): super(MyInstall, self).run() # we have to write a configuration file because we need the # <install_data> directory (and other stuff like author, url, ...) data = [] for d in ['purelib', 'platlib', 'lib', 'headers', 'scripts', 'data']: attr = 'install_%s'%d if self.root: # cut off root path prefix cutoff = len(self.root) # don't strip the path separator if self.root.endswith(os.sep): cutoff -= 1 val = getattr(self, attr)[cutoff:] else: val = getattr(self, attr) if attr=="install_data": base = os.path.join(val, 'share', 'webcleaner') data.append('config_dir = %r' % \ os.path.normcase(os.path.join(base, 'config'))) data.append('template_dir = %r' % \ os.path.normcase(os.path.join(base, 'templates'))) data.append("%s = %r" % (attr, val)) self.distribution.create_conf_file(data, directory=self.install_lib) | bc3e6db3386fff5a9a56773213e6c935859ec279 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/bc3e6db3386fff5a9a56773213e6c935859ec279/setup.py |
os.path.normcase(os.path.join(base, 'templates'))) | cnormpath(os.path.join(base, 'templates'))) val = cnormpath(val) | def run (self): super(MyInstall, self).run() # we have to write a configuration file because we need the # <install_data> directory (and other stuff like author, url, ...) data = [] for d in ['purelib', 'platlib', 'lib', 'headers', 'scripts', 'data']: attr = 'install_%s'%d if self.root: # cut off root path prefix cutoff = len(self.root) # don't strip the path separator if self.root.endswith(os.sep): cutoff -= 1 val = getattr(self, attr)[cutoff:] else: val = getattr(self, attr) if attr=="install_data": base = os.path.join(val, 'share', 'webcleaner') data.append('config_dir = %r' % \ os.path.normcase(os.path.join(base, 'config'))) data.append('template_dir = %r' % \ os.path.normcase(os.path.join(base, 'templates'))) data.append("%s = %r" % (attr, val)) self.distribution.create_conf_file(data, directory=self.install_lib) | bc3e6db3386fff5a9a56773213e6c935859ec279 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/bc3e6db3386fff5a9a56773213e6c935859ec279/setup.py |
bv = "7.1" | def get_exe_bytes (self): if win_cross_compiling: bv = "7.1" # wininst-x.y.exe is in the same directory as bdist_wininst directory = os.path.dirname(distutils.command.__file__) # we must use a wininst-x.y.exe built with the same C compiler # used for python. filename = os.path.join(directory, "wininst-%s.exe" % bv) return open(filename, "rb").read() return super(MyBdistWininst, self).get_exe_bytes() | bc3e6db3386fff5a9a56773213e6c935859ec279 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/bc3e6db3386fff5a9a56773213e6c935859ec279/setup.py |
|
filename = os.path.join(directory, "wininst-%s.exe" % bv) | filename = os.path.join(directory, "wininst.exe") | def get_exe_bytes (self): if win_cross_compiling: bv = "7.1" # wininst-x.y.exe is in the same directory as bdist_wininst directory = os.path.dirname(distutils.command.__file__) # we must use a wininst-x.y.exe built with the same C compiler # used for python. filename = os.path.join(directory, "wininst-%s.exe" % bv) return open(filename, "rb").read() return super(MyBdistWininst, self).get_exe_bytes() | bc3e6db3386fff5a9a56773213e6c935859ec279 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/bc3e6db3386fff5a9a56773213e6c935859ec279/setup.py |
sources = [p('wc/HtmlParser/htmllex.c'), p('wc/HtmlParser/htmlparse.c'), p('wc/HtmlParser/s_util.c'), | sources = [normpath('wc/HtmlParser/htmllex.c'), normpath('wc/HtmlParser/htmlparse.c'), normpath('wc/HtmlParser/s_util.c'), | def get_exe_bytes (self): if win_cross_compiling: bv = "7.1" # wininst-x.y.exe is in the same directory as bdist_wininst directory = os.path.dirname(distutils.command.__file__) # we must use a wininst-x.y.exe built with the same C compiler # used for python. filename = os.path.join(directory, "wininst-%s.exe" % bv) return open(filename, "rb").read() return super(MyBdistWininst, self).get_exe_bytes() | bc3e6db3386fff5a9a56773213e6c935859ec279 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/bc3e6db3386fff5a9a56773213e6c935859ec279/setup.py |
depends = [p("wc/HtmlParser/htmlsax.h"), p('wc/HtmlParser/s_util.h')], include_dirs = include_dirs + [p("wc/HtmlParser")], | depends = [normpath("wc/HtmlParser/htmlsax.h"), normpath('wc/HtmlParser/s_util.h')], include_dirs = include_dirs + [normpath("wc/HtmlParser")], | def get_exe_bytes (self): if win_cross_compiling: bv = "7.1" # wininst-x.y.exe is in the same directory as bdist_wininst directory = os.path.dirname(distutils.command.__file__) # we must use a wininst-x.y.exe built with the same C compiler # used for python. filename = os.path.join(directory, "wininst-%s.exe" % bv) return open(filename, "rb").read() return super(MyBdistWininst, self).get_exe_bytes() | bc3e6db3386fff5a9a56773213e6c935859ec279 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/bc3e6db3386fff5a9a56773213e6c935859ec279/setup.py |
sources = [p('wc/levenshtein.c'),], | sources = [normpath('wc/levenshtein.c'),], | def get_exe_bytes (self): if win_cross_compiling: bv = "7.1" # wininst-x.y.exe is in the same directory as bdist_wininst directory = os.path.dirname(distutils.command.__file__) # we must use a wininst-x.y.exe built with the same C compiler # used for python. filename = os.path.join(directory, "wininst-%s.exe" % bv) return open(filename, "rb").read() return super(MyBdistWininst, self).get_exe_bytes() | bc3e6db3386fff5a9a56773213e6c935859ec279 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/bc3e6db3386fff5a9a56773213e6c935859ec279/setup.py |
def __init__ (self, headers): | def __init__ (self): | def __init__ (self, headers): """ Initialize internal buffers and flags. """ self.headers = headers self.buf = '' self.bytes_remaining = None self.closed = False | 13d3c310a915e8ab6172f446172dcee2ea602da9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/13d3c310a915e8ab6172f446172dcee2ea602da9/UnchunkStream.py |
self.headers = headers | def __init__ (self, headers): """ Initialize internal buffers and flags. """ self.headers = headers self.buf = '' self.bytes_remaining = None self.closed = False | 13d3c310a915e8ab6172f446172dcee2ea602da9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/13d3c310a915e8ab6172f446172dcee2ea602da9/UnchunkStream.py |
|
if i > 0: | if i >= 0: | def read_footers (self): i = self.buf.find('\r\n\r\n') if i > 0: fp = StringIO.StringIO(self.buf[:i]) self.buf = self.buf[i+4:] msg = wc.http.header.WcMessage(fp) fp.close() for name in msg: for value in msg.getheaders(name): self.headers.addheader(name, value) | 13d3c310a915e8ab6172f446172dcee2ea602da9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/13d3c310a915e8ab6172f446172dcee2ea602da9/UnchunkStream.py |
self.headers.addheader(name, value) | pass | def read_footers (self): i = self.buf.find('\r\n\r\n') if i > 0: fp = StringIO.StringIO(self.buf[:i]) self.buf = self.buf[i+4:] msg = wc.http.header.WcMessage(fp) fp.close() for name in msg: for value in msg.getheaders(name): self.headers.addheader(name, value) | 13d3c310a915e8ab6172f446172dcee2ea602da9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/13d3c310a915e8ab6172f446172dcee2ea602da9/UnchunkStream.py |
remove_old_warning_headers(headers) | remove_warning_headers(headers) | def set_proxy_headers (headers): remove_hop_by_hop_headers(headers) set_via_header(headers) set_date_header(headers) remove_old_warning_headers(headers) | 1b5a86c3b3ea4607d3e2d19cf1ac6551a780b2e7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/1b5a86c3b3ea4607d3e2d19cf1ac6551a780b2e7/HttpServer.py |
if self.rule: | if self.rule is None: self.folder.end_data(name) else: | def end_element (self, name): self.cmode = None if self.rule: self.rule.end_data(name) if name in rulenames: if self.compile_data: self.rule.compile_data() elif name=='folder': if self.compile_data: self.folder.compile_data() | 3b1dd0c1c8226016f7131228333bb2ac1bd852ed /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/3b1dd0c1c8226016f7131228333bb2ac1bd852ed/__init__.py |
"""delegate unknown attrs to self.parser""" | """delegate attrs to self.parser""" | def __getattr__ (self, name): """delegate unknown attrs to self.parser""" return getattr(self.parser, name) | 5f911f689fc469bb83a15a0205e5bb39b7ea7514 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/5f911f689fc469bb83a15a0205e5bb39b7ea7514/htmllib.py |
self.buffer_append_data([DATA; "<?%s?>"%data]) | self.buffer_append_data([DATA, "<?%s?>"%data]) | def pi (self, data): self.buffer_append_data([DATA; "<?%s?>"%data]) | db7e539dd84ff8d24e5028f666e3e0fea16c8690 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/db7e539dd84ff8d24e5028f666e3e0fea16c8690/Rewriter.py |
tracelog = log | if tracelog is None: tracelog = log | def trace (log): """ Start tracing of the current thread (and the current thread only). """ global tracelog tracelog = log sys.settrace(_traceit) | faf1ea81ce85e52a6f731442dc751ffe1189caab /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/faf1ea81ce85e52a6f731442dc751ffe1189caab/log.py |
debug(tracelog, "%s:%s: %s", name, lineno, line.rstrip()) | info(tracelog, "%s:%s: %s", name, lineno, line.rstrip()) | def _traceit (frame, event, arg): """ Print current executed line. """ if event == "line": lineno = frame.f_lineno filename = frame.f_globals["__file__"] if filename.endswith(".pyc") or filename.endswith(".pyo"): filename = filename[:-1] name = frame.f_globals["__name__"] line = linecache.getline(filename, lineno) debug(tracelog, "%s:%s: %s", name, lineno, line.rstrip()) return _traceit | faf1ea81ce85e52a6f731442dc751ffe1189caab /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/faf1ea81ce85e52a6f731442dc751ffe1189caab/log.py |
traceback.print_stack(stack, file=s) if PRINT_LOCALVARS: s.write("Locals by frame, innermost last%s" % os.linesep) for frame in stack: s.write(os.linesep) s.write("Frame %s in %s at line %s%s" % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno, os.linesep)) | s.write('Traceback:') s.write(os.linesep) for frame, fname, lineno, method, lines, i in reversed(stack): s.write(' File %r, line %d, in %s' % (fname, lineno, method)) s.write(os.linesep) s.write(' %s' % lines[0].lstrip()) if PRINT_LOCALVARS: | def _stack_format (stack): """ Format a stack trace to a message. @return: formatted stack message @rtype: string """ s = StringIO.StringIO() traceback.print_stack(stack, file=s) if PRINT_LOCALVARS: s.write("Locals by frame, innermost last%s" % os.linesep) for frame in stack: s.write(os.linesep) s.write("Frame %s in %s at line %s%s" % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno, os.linesep)) for key, value in frame.f_locals.items(): s.write("\t%20s = " % key) # be careful not to cause a new error in the error output try: s.write(str(value)) s.write(os.linesep) except: s.write("error in str() call%s" % os.linesep) return s.getvalue() | faf1ea81ce85e52a6f731442dc751ffe1189caab /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/faf1ea81ce85e52a6f731442dc751ffe1189caab/log.py |
s.write("\t%20s = " % key) | s.write(" %s = " % key) | def _stack_format (stack): """ Format a stack trace to a message. @return: formatted stack message @rtype: string """ s = StringIO.StringIO() traceback.print_stack(stack, file=s) if PRINT_LOCALVARS: s.write("Locals by frame, innermost last%s" % os.linesep) for frame in stack: s.write(os.linesep) s.write("Frame %s in %s at line %s%s" % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno, os.linesep)) for key, value in frame.f_locals.items(): s.write("\t%20s = " % key) # be careful not to cause a new error in the error output try: s.write(str(value)) s.write(os.linesep) except: s.write("error in str() call%s" % os.linesep) return s.getvalue() | faf1ea81ce85e52a6f731442dc751ffe1189caab /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/faf1ea81ce85e52a6f731442dc751ffe1189caab/log.py |
s.write(str(value)) | s.write(repr(value)) | def _stack_format (stack): """ Format a stack trace to a message. @return: formatted stack message @rtype: string """ s = StringIO.StringIO() traceback.print_stack(stack, file=s) if PRINT_LOCALVARS: s.write("Locals by frame, innermost last%s" % os.linesep) for frame in stack: s.write(os.linesep) s.write("Frame %s in %s at line %s%s" % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno, os.linesep)) for key, value in frame.f_locals.items(): s.write("\t%20s = " % key) # be careful not to cause a new error in the error output try: s.write(str(value)) s.write(os.linesep) except: s.write("error in str() call%s" % os.linesep) return s.getvalue() | faf1ea81ce85e52a6f731442dc751ffe1189caab /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/faf1ea81ce85e52a6f731442dc751ffe1189caab/log.py |
s.write("error in str() call%s" % os.linesep) | s.write("error in repr() call%s" % os.linesep) | def _stack_format (stack): """ Format a stack trace to a message. @return: formatted stack message @rtype: string """ s = StringIO.StringIO() traceback.print_stack(stack, file=s) if PRINT_LOCALVARS: s.write("Locals by frame, innermost last%s" % os.linesep) for frame in stack: s.write(os.linesep) s.write("Frame %s in %s at line %s%s" % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno, os.linesep)) for key, value in frame.f_locals.items(): s.write("\t%20s = " % key) # be careful not to cause a new error in the error output try: s.write(str(value)) s.write(os.linesep) except: s.write("error in str() call%s" % os.linesep) return s.getvalue() | faf1ea81ce85e52a6f731442dc751ffe1189caab /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/3948/faf1ea81ce85e52a6f731442dc751ffe1189caab/log.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.