rem
stringlengths 1
322k
| add
stringlengths 0
2.05M
| context
stringlengths 4
228k
| meta
stringlengths 156
215
|
---|---|---|---|
files[index] = index.encode('utf-7') | files[index] = 'idx_' + index.encode('utf-7') | def dump_dict_index(index): file_path = os.path.join(dest_dir, dict_prefix, '%s.json' % files[index]) f = open(file_path, 'wb') json.dump(indexed_phrases, f, indent=(2 if options.pretty else None)) if options.verbose: print >> sys.stderr, '%d phrases of index %s written to %s.' % (indexed_phrase_count, index, file_path) | 91cbbc26287b84c63b18ab00bc6dd576652d27ac /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5124/91cbbc26287b84c63b18ab00bc6dd576652d27ac/make-json.py |
for ikey in io_map: s = set() for okey in io_map[ikey]: s |= set(keywords[okey]) for x in s: print u'\t'.join([ikey, x]).encode('utf-8') | if options.ikey: for ikey in io_map: s = set() for okey in io_map[ikey]: s |= set(keywords[okey]) for x in s: print u'\t'.join([ikey, x]).encode('utf-8') else: for spelling in spelling_map: ikey = spelling_map[spelling] s = set() for okey in io_map[ikey]: s |= set(keywords[okey]) for x in s: print u'\t'.join([spelling, ikey, x]).encode('utf-8') | def to_js_regex(r): p = r.split(None, 1) if len(p) < 2: return r p[1] = back_ref.sub(ur'$\1', back_ref_g.sub(ur'$\1', p[1])) return u' '.join(p) | b91e80454cf343fb56f29d11181e14b5d2278d07 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5124/b91e80454cf343fb56f29d11181e14b5d2278d07/translate-keywords.py |
spelling_map[t] = s | spelling_map[t] = ikey | def apply_alternative_rule(d, r): for x in d.keys(): if not r[0].search(x): continue y = transform(x, r) if y == x: continue if y not in d: d[y] = d[x] elif self.__report_errors: raise SpellingCollisionError('AlternativeRule', (x, d[x], y, d[y])) return d | 4197eaa4ae02f166eb395d65b8a7b105c4b7f90d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5124/4197eaa4ae02f166eb395d65b8a7b105c4b7f90d/make-json.py |
x = line.strip () | x = line.strip ().decode ('utf-8') | def debug (*what): print >> sys.stderr, '[DEBUG]: ', ' '.join (map (unicode, what)) | 0212345857a32671444a9cfb7463e7856d227827 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5124/0212345857a32671444a9cfb7463e7856d227827/make-json.py |
max_key_length = 3 | max_key_length = 2 | def debug(*what): print >> sys.stderr, '[DEBUG]: ', ' '.join(map(unicode, what)) | 9d399919310ee2905273a1f5452413fdf92e49c2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5124/9d399919310ee2905273a1f5452413fdf92e49c2/make-json.py |
max_key_length = int(value) | max_key_length = max(2, int(value)) | def to_js_regex(r): p = r.split(None, 1) if len(p) < 2: return r p[1] = back_ref.sub(ur'$\1', back_ref_g.sub(ur'$\1', p[1])) return u' '.join(p) | 9d399919310ee2905273a1f5452413fdf92e49c2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5124/9d399919310ee2905273a1f5452413fdf92e49c2/make-json.py |
k, w = x.split(None, 1) | k, w = x.split(u'\t', 1) | def debug(*what): print >> sys.stderr, u'[DEBUG]: ', u' '.join(map(unicode, what)) | 8386e1ce5111bb1721836b509311d1be728e9762 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5124/8386e1ce5111bb1721836b509311d1be728e9762/make-phrases.py |
spelling_map = reduce(apply_alternative_rule, alternative_rules, spelling_map) | def apply_alternative_rule(d, r): for x in d.keys(): if not r[0].search(x): continue y = transform(x, r) if y == x: continue if y not in d: d[y] = d[x] elif self.__report_errors: raise SpellingCollisionError('AlternativeRule', (x, d[x], y, d[y])) return d | a76b438d8c67668f0f182784e882812dcb6608fd /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5124/a76b438d8c67668f0f182784e882812dcb6608fd/translate-keywords.py |
|
spelling_map[t] = s | spelling_map[t] = ikey | def apply_alternative_rule(d, r): for x in d.keys(): if not r[0].search(x): continue y = transform(x, r) if y == x: continue if y not in d: d[y] = d[x] elif self.__report_errors: raise SpellingCollisionError('AlternativeRule', (x, d[x], y, d[y])) return d | f04c001748d7df46310c743f58173c6448891d8e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5124/f04c001748d7df46310c743f58173c6448891d8e/translate-keywords.py |
the_input_file="/local/scratch/hauth/data/ZPJ2010/mu_data.root" | the_input_file="/local/scratch/hauth/data/ZPJ2010/mu_data_2010a+b.root" | def multiline_text(line1,line2,line3="",line4=""): string = "#scale[.8]{#splitline{#splitline{%s}{%s}}{#splitline{%s}{%s}}}" %(line1,line2,line3,line4) return string | 9bfeda8db3e2e53af76c6c1b163619765c1d2ae0 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5426/9bfeda8db3e2e53af76c6c1b163619765c1d2ae0/create_Response_L2_data.py |
if len(readset) < 1 or not self.sock in readset[0]: if len(readset) < 1: print "select() returns " + str(len(readset)) | if len(readset[0]) < 1 or not self.sock in readset[0]: if len(readset[0]) < 1: print "select() timeout" | def wait_msg_from(self, timeout): endtime = time.time() + timeout msg = "" src_addr = None while time.time() < endtime: readset = select([self.sock], [], [], timeout) if len(readset) < 1 or not self.sock in readset[0]: if len(readset) < 1: print "select() returns " + str(len(readset)) elif not self.sock in readset[0]: print "select() alien socket" else: print "select other error" continue try: msg, src_addr = self.sock.recvfrom(4096) except: print "recv() exception: ", sys.exc_info()[0] continue | 471a860da2b04a110cd9875171efc5f9d257186b /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8531/471a860da2b04a110cd9875171efc5f9d257186b/inc_sip.py |
while cseq <= last_cseq and method == last_method: request, src_addr = dlg.wait_msg_from(10) | call_id = last_call_id while cseq <= last_cseq and method == last_method and call_id == last_call_id: request, src_addr = dlg.wait_msg_from(30) | def test_func(test): pjsua = test.process[0] dlg = sip.Dialog("127.0.0.1", pjsua.inst_param.sip_port, local_port=srv_port, tcp=cfg_file.recvfrom_cfg.tcp) last_cseq = 0 last_method = "" for t in cfg_file.recvfrom_cfg.transaction: # Print transaction title if t.title != "": dlg.trace(t.title) # Run command and expect patterns for c in t.cmds: if c[0] and c[0] != "": pjsua.send(c[0]) if len(c)>1 and c[1] and c[1] != "": pjsua.expect(c[1]) # Wait for request if t.check_cseq: # Absorbs retransmissions cseq = 0 method = last_method while cseq <= last_cseq and method == last_method: request, src_addr = dlg.wait_msg_from(10) if request==None or request=="": raise TestError("Timeout waiting for request") method = request.split(" ", 1)[0] cseq_hval = sip.get_header(request, "CSeq") cseq_hval = cseq_hval.split(" ")[0] cseq = int(cseq_hval) last_cseq = cseq last_method = method else: request, src_addr = dlg.wait_msg_from(10) if request==None or request=="": raise TestError("Timeout waiting for request") # Check for include patterns for pat in t.include: if re.search(pat, request, re.M | re.I)==None: if t.title: tname = " in " + t.title + " transaction" else: tname = "" raise TestError("Pattern " + pat + " not found" + tname) # Check for exclude patterns for pat in t.exclude: if re.search(pat, request, re.M | re.I)!=None: if t.title: tname = " in " + t.title + " transaction" else: tname = "" raise TestError("Excluded pattern " + pat + " found" + tname) # Create response if t.resp_code!=0: response = dlg.create_response(request, t.resp_code, "Status reason") # Add headers to response for h in t.resp_hdr: response = response + h + "\r\n" # Add message body if required if t.body: response = response + t.body # Send response dlg.send_msg(response, src_addr) # Expect something to happen in pjsua if t.expect != "": pjsua.expect(t.expect) # Sync pjsua.sync_stdout() | 100a380d2ad937011127ff124c3d220ce81e19b5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8531/100a380d2ad937011127ff124c3d220ce81e19b5/mod_recvfrom.py |
request, src_addr = dlg.wait_msg_from(10) | request, src_addr = dlg.wait_msg_from(30) | def test_func(test): pjsua = test.process[0] dlg = sip.Dialog("127.0.0.1", pjsua.inst_param.sip_port, local_port=srv_port, tcp=cfg_file.recvfrom_cfg.tcp) last_cseq = 0 last_method = "" for t in cfg_file.recvfrom_cfg.transaction: # Print transaction title if t.title != "": dlg.trace(t.title) # Run command and expect patterns for c in t.cmds: if c[0] and c[0] != "": pjsua.send(c[0]) if len(c)>1 and c[1] and c[1] != "": pjsua.expect(c[1]) # Wait for request if t.check_cseq: # Absorbs retransmissions cseq = 0 method = last_method while cseq <= last_cseq and method == last_method: request, src_addr = dlg.wait_msg_from(10) if request==None or request=="": raise TestError("Timeout waiting for request") method = request.split(" ", 1)[0] cseq_hval = sip.get_header(request, "CSeq") cseq_hval = cseq_hval.split(" ")[0] cseq = int(cseq_hval) last_cseq = cseq last_method = method else: request, src_addr = dlg.wait_msg_from(10) if request==None or request=="": raise TestError("Timeout waiting for request") # Check for include patterns for pat in t.include: if re.search(pat, request, re.M | re.I)==None: if t.title: tname = " in " + t.title + " transaction" else: tname = "" raise TestError("Pattern " + pat + " not found" + tname) # Check for exclude patterns for pat in t.exclude: if re.search(pat, request, re.M | re.I)!=None: if t.title: tname = " in " + t.title + " transaction" else: tname = "" raise TestError("Excluded pattern " + pat + " found" + tname) # Create response if t.resp_code!=0: response = dlg.create_response(request, t.resp_code, "Status reason") # Add headers to response for h in t.resp_hdr: response = response + h + "\r\n" # Add message body if required if t.body: response = response + t.body # Send response dlg.send_msg(response, src_addr) # Expect something to happen in pjsua if t.expect != "": pjsua.expect(t.expect) # Sync pjsua.sync_stdout() | 100a380d2ad937011127ff124c3d220ce81e19b5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8531/100a380d2ad937011127ff124c3d220ce81e19b5/mod_recvfrom.py |
readset = select([self.sock], [], [], timeout) | readset = select([self.sock], [], [], 1) | def wait_msg_from(self, timeout): endtime = time.time() + timeout msg = "" src_addr = None while time.time() < endtime: readset = select([self.sock], [], [], timeout) if len(readset[0]) < 1 or not self.sock in readset[0]: if len(readset[0]) < 1: print "select() timeout" elif not self.sock in readset[0]: print "select() alien socket" else: print "select other error" continue try: msg, src_addr = self.sock.recvfrom(4096) except: print "recv() exception: ", sys.exc_info()[0] continue | 24a513f202f08fb30c0c8f7af83e71ba38a04db4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8531/24a513f202f08fb30c0c8f7af83e71ba38a04db4/inc_sip.py |
print "select() timeout" | print "select() timeout (will wait for " + str(int(endtime - time.time())) + "more secs)" | def wait_msg_from(self, timeout): endtime = time.time() + timeout msg = "" src_addr = None while time.time() < endtime: readset = select([self.sock], [], [], timeout) if len(readset[0]) < 1 or not self.sock in readset[0]: if len(readset[0]) < 1: print "select() timeout" elif not self.sock in readset[0]: print "select() alien socket" else: print "select other error" continue try: msg, src_addr = self.sock.recvfrom(4096) except: print "recv() exception: ", sys.exc_info()[0] continue | 24a513f202f08fb30c0c8f7af83e71ba38a04db4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8531/24a513f202f08fb30c0c8f7af83e71ba38a04db4/inc_sip.py |
self.resp_body = resp_body | self.body = resp_body | def __init__(self, title, resp_code, check_cseq=True, include=[], exclude=[], cmds=[], resp_hdr=[], resp_body=None, expect=""): self.title = title self.cmds = cmds self.include = include self.exclude = exclude self.resp_code = resp_code self.resp_hdr = resp_hdr self.resp_body = resp_body self.expect = expect | 05beb772142d55a390d3224343808c13cac47ff5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8531/05beb772142d55a390d3224343808c13cac47ff5/inc_sip.py |
scenario.append(scenario) | scenarios.append(scenario) | def usage(): print """Periodically monitor working directory for Continuous and Nightly builds | 7c4b94345b2caec32a64a7ee992e6bc3128ef15f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8531/7c4b94345b2caec32a64a7ee992e6bc3128ef15f/run_continuous.py |
print "<<< " + w | if self.DEBUG: print "<<< " + w | def writeWord(self, w): print "<<< " + w self.writeLen(len(w)) self.writeStr(w) | 14ee611de3f84ae451855773247909ab4a59523c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13877/14ee611de3f84ae451855773247909ab4a59523c/RosAPI.py |
print ">>> " + ret | if self.DEBUG: print ">>> " + ret | def readWord(self): ret = self.readStr(self.readLen()) print ">>> " + ret return ret | 14ee611de3f84ae451855773247909ab4a59523c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13877/14ee611de3f84ae451855773247909ab4a59523c/RosAPI.py |
def run_interpreter(self): import select, sys inputsentence = [] | 14ee611de3f84ae451855773247909ab4a59523c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13877/14ee611de3f84ae451855773247909ab4a59523c/RosAPI.py |
||
self.assertTrue('Please log in' in self.browser.contents) | self.assertTrue('Login Name' in self.browser.contents) | def test_unauthenticated(self): ''' unauthenticated users do not have the necessary permissions to view the review list ''' self.browser.open('http://nohost/plone/full_review_list') self.assertTrue('Please log in' in self.browser.contents) | 368c3db384a4e7283e0ed43d722155a3a003bcad /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12237/368c3db384a4e7283e0ed43d722155a3a003bcad/test_reviewlist.py |
icon = t.getProperty('icon_expr') | icon = t.getIconExprObject() | def addable_types(self, include=None): """Return menu item entries in a TAL-friendly form. | dbd16b4ff19d9991d2a49a9d6828e52f88e4a391 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12237/dbd16b4ff19d9991d2a49a9d6828e52f88e4a391/folderfactories.py |
icon = t.getIconExprObject() | def addable_types(self, include=None): """Return menu item entries in a TAL-friendly form. | dbd16b4ff19d9991d2a49a9d6828e52f88e4a391 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12237/dbd16b4ff19d9991d2a49a9d6828e52f88e4a391/folderfactories.py |
|
return context_state.folder() | if context_state.is_structural_folder(): return self.context else: return context_state.folder() | def add_context(self): context_state = getMultiAdapter((self.context, self.request), name='plone_context_state') return context_state.folder() | d79b3062bff4383c8f70a0596e09206db9e16d84 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12237/d79b3062bff4383c8f70a0596e09206db9e16d84/folderfactories.py |
def __init__(self, context, request): self.context = context self.request = request | def __init__(self, context, request): self.context = context self.request = request | 5f69b2d5614b689d44ef325210550786b98d7eef /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6684/5f69b2d5614b689d44ef325210550786b98d7eef/export.py |
|
templateOutput = self.template().encode('utf-8') | templateOutput = self.template(self).encode('utf-8') | def __call__(self): response = self.request.response response.setHeader('Content-Type', 'application/xml+epub') response.setHeader('Content-Disposition', 'attachment; filename=%s.epub' % self.context.id) | 5f69b2d5614b689d44ef325210550786b98d7eef /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6684/5f69b2d5614b689d44ef325210550786b98d7eef/export.py |
zipFile.writestr('META-INF/container.xml', replace('META-INF/container', {})) | zipFile.writestr('META-INF/container.xml', replace('META-INF/container.xml', {})) | def __call__(self): response = self.request.response response.setHeader('Content-Type', 'application/xml+epub') response.setHeader('Content-Disposition', 'attachment; filename=%s.epub' % self.context.id) | ce4bb1412625f351e681ec29c5620e783a484010 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6684/ce4bb1412625f351e681ec29c5620e783a484010/export.py |
folder = self.context[self.context.invokeFactory('Folder', id=epub.ploneID)] | id = epub.ploneID count = 0 while hasattr(self.context, id): count += 1 id = '%s-%i' % (epub.ploneID, count) folder = self.context[self.context.invokeFactory('Folder', id=id)] | def importFile(self, epubFile): zipFile = ZipFile(epubFile, 'r') epub = EpubFile(zipFile) | a46893ef85d0534a71f18f39022badfdac505717 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6684/a46893ef85d0534a71f18f39022badfdac505717/import.py |
templateOutput = self.template(self).encode('utf-8') | templateOutput = self.template(self) templateOutput = templateOutput.decode('utf-8') templateOutput = templateOutput.encode('utf-8') | def __call__(self): response = self.request.response response.setHeader('Content-Type', 'application/xml+epub') response.setHeader('Content-Disposition', 'attachment; filename=%s.epub' % self.context.id) | 9121d69d9223e133cd3157580f706cc274eb4b6c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6684/9121d69d9223e133cd3157580f706cc274eb4b6c/export.py |
description = description.text.strip() | def chapters(self): guide = self.rootFile.find('guide') if guide == None: return [] | b6049a662e93ce9bb49109d2fc619af969b2721f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6684/b6049a662e93ce9bb49109d2fc619af969b2721f/import.py |
|
self.headers['Content-Length'] = len(self.encodeddata) | self.headers['Content-Length'] = str(len(self.encodeddata)) | def setMultipart(self, multipart=True): """Enable multipart data transfer, required for file uploads.""" if not canupload and multipart: raise APIError("The poster package is required for multipart support") self.multipart = multipart if multipart: (datagen, headers) = multipart_encode(self.data) self.headers.pop('Content-Length') self.headers.pop('Content-Type') self.headers.update(headers) self.encodeddata = '' for singledata in datagen: self.encodeddata = self.encodeddata + singledata else: self.encodeddata = urlencode(self.data, 1) self.headers['Content-Length'] = len(self.encodeddata) self.headers['Content-Type'] = "application/x-www-form-urlencoded" | bfbf9aed4f729ebef978c3858ba20be5f4135a3d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8398/bfbf9aed4f729ebef978c3858ba20be5f4135a3d/api.py |
self.headers['Content-Length'] = len(self.encodeddata) | self.headers['Content-Length'] = str(len(self.encodeddata)) | def changeParam(self, param, value): """Change or add a parameter after making the request object Simply changing self.data won't work as it needs to update other things. | bfbf9aed4f729ebef978c3858ba20be5f4135a3d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8398/bfbf9aed4f729ebef978c3858ba20be5f4135a3d/api.py |
tlist = unicode(tlist) | tlist = unicode(tlist, 'utf8') | def listFromTitles(site, titles, check=True, followRedir=False): """Create a list of page objects from a list of titles check and followRedir have the same meaning as in page.Page """ ret = [] if not check: for title in titles: title = page.Page(site, title=title, check=False) ret.append(title) else: querylist = [] limit = int(site.limit) if len(titles) > limit/10: iters = int(math.ceil(float(len(titles)) / (limit/10))) for x in range(0,iters): lower = x*limit/10 upper = (x+1)*limit/10 querylist.append(titles[lower:upper]) else: querylist.append(titles) response = False for item in querylist: tlist = '|'.join(item) if not isinstance(tlist, unicode): tlist = unicode(tlist) params = {'action':'query', 'titles':tlist, } if followRedir: params['redirects'] = '' req = api.APIRequest(site, params) res = req.query(False) if not response: response = res else: # This breaks on non-existent titles, the api gives them negative numbers # resultCombine doesn't account for this and ignores or overwrites the # duplicate pageids response = api.resultCombine('', response, res) for key in response['query']['pages'].keys(): res = response['query']['pages'][key] item = makePage(key, res, site) ret.append(item) return ret | 0fccd5dee28ace39b1660dfd6b4ed69e8ef7f66f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8398/0fccd5dee28ace39b1660dfd6b4ed69e8ef7f66f/pagelist.py |
self.useragent = "python-wikitools/1.0" | self.useragent = "python-wikitools/%s" % VERSION | def logout(self): params = { 'action': 'logout' } if self.maxlag < 120: params['maxlag'] = 120 cookiefile = self.cookiepath + str(hash(self.username+' - '+self.apibase))+'.cookies' try: os.remove(cookiefile) except: pass req = api.APIRequest(self, params, write=True) # action=logout returns absolutely nothing, which json.loads() treats as False # causing APIRequest.query() to get stuck in a loop req.opener.open(req.request) self.cookies = WikiCookieJar() self.username = '' self.maxlag = 5 self.useragent = "python-wikitools/1.0" self.limit = 500 return True | 34d5ff1580e3c19835c15c5e841f6e84edbee7e3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8398/34d5ff1580e3c19835c15c5e841f6e84edbee7e3/wiki.py |
if self.name == other.name and self.site == other.wiki: | if self.name == other.name and self.site == other.site: | def __eq__(self, other): if not isinstance(other, User): return False if self.name == other.name and self.site == other.wiki: return True return False | ba0963ae74cc5bece5f1be6be7057488bcaf6635 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8398/ba0963ae74cc5bece5f1be6be7057488bcaf6635/user.py |
if self.name == other.name and self.site == other.wiki: | if self.name == other.name and self.site == other.site: | def __ne__(self, other): if not isinstance(other, User): return True if self.name == other.name and self.site == other.wiki: return False return True | ba0963ae74cc5bece5f1be6be7057488bcaf6635 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8398/ba0963ae74cc5bece5f1be6be7057488bcaf6635/user.py |
raise DimensionError("Can't specify both width and height") | raise FileDimensionError("Can't specify both width and height") | def download(self, width=False, height=False, location=False): """Download the image to a local file width/height - set width OR height of the downloaded image location - set the filename to save to. If not set, the page title minus the namespace prefix will be used and saved to the current directory """ if self.pageid == 0: self.setPageInfo() params = {'action':'query', 'prop':'imageinfo', 'iiprop':'url' } if width and height: raise DimensionError("Can't specify both width and height") if width: params['iiurlwidth'] = width if height: params['iiurlheight'] = height if self.pageid != 0: params['pageids'] = self.pageid elif self.title: params['titles'] = self.title else: self.setPageInfo() if not self.exists: # Non-existant files may be on a shared repo (e.g. commons) params['titles'] = self.title else: params['pageids'] = self.pageid req = api.APIRequest(self.site, params) res = req.query(False) key = res['query']['pages'].keys()[0] url = res['query']['pages'][key]['imageinfo'][0]['url'] if not location: location = self.title.split(':', 1)[1] opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.site.cookies)) headers = { "User-agent": self.site.useragent } request = urllib2.Request(url, None, headers) data = opener.open(request) f = open(location, 'wb', 0) f.write(data.read()) f.close() return location | ac6f101ec0a74149c27bbfaac0ba2ed9953cbac2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8398/ac6f101ec0a74149c27bbfaac0ba2ed9953cbac2/wikifile.py |
try: print info['login']['result'] except: print info['error']['code'] print info['error']['info'] return False | return loginerror(info) | def login(self, username, password=False, remember=False, force=False, verify=True, domain=None): """Login to the site remember - saves cookies to a file - the filename will be: hash(username - apibase).cookies the cookies will be saved in the current directory, change cookiepath to use a different location force - forces login over the API even if a cookie file exists and overwrites an existing cookie file if remember is True verify - Checks cookie validity with isLoggedIn() domain - domain name, required for some auth systems like LDAP """ if not force: try: cookiefile = self.cookiepath + str(hash(username+' - '+self.apibase))+'.cookies' self.cookies.load(self, cookiefile, True, True) self.username = username if not verify or self.isLoggedIn(self.username): return True except: pass if not password: from getpass import getpass password = getpass() data = { "action" : "login", "lgname" : username, "lgpassword" : password, } if domain is not None: data["lgdomain"] = domain if self.maxlag < 120: data['maxlag'] = 120 req = api.APIRequest(self, data) info = req.query() if info['login']['result'] == "Success": self.username = username else: try: print info['login']['result'] except: print info['error']['code'] print info['error']['info'] return False if not self.siteinfo: self.setSiteinfo() params = { 'action': 'query', 'meta': 'userinfo', 'uiprop': 'rights', } if self.maxlag < 120: params['maxlag'] = 120 req = api.APIRequest(self, params) info = req.query() user_rights = info['query']['userinfo']['rights'] if 'apihighlimits' in user_rights: self.limit = 5000 if remember: cookiefile = self.cookiepath + str(hash(self.username+' - '+self.apibase))+'.cookies' self.cookies.save(self, cookiefile, True, True) if self.useragent == "python-wikitools/%s" % VERSION: self.useragent = "python-wikitools/%s (User:%s)" % (VERSION, self.username) return True | a86fed85ccca03200cc67ffa5791efd0affb814f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8398/a86fed85ccca03200cc67ffa5791efd0affb814f/wiki.py |
attr = "NS_%s" % (nsdata[ns]['canonical'].replace(' ', '_').upper()) | try: attr = "NS_%s" % (nsdata[ns]['canonical'].replace(' ', '_').upper()) except KeyError: attr = "NS_%s" % (nsdata[ns]['*'].replace(' ', '_').upper()) | def setSiteinfo(self): """Retrieves basic siteinfo Called when constructing, or after login if the first call failed """ params = {'action':'query', 'meta':'siteinfo', 'siprop':'general|namespaces|namespacealiases', } if self.maxlag < 120: params['maxlag'] = 120 req = api.APIRequest(self, params) info = req.query() sidata = info['query']['general'] for item in sidata: self.siteinfo[item] = sidata[item] nsdata = info['query']['namespaces'] for ns in nsdata: nsinfo = nsdata[ns] self.namespaces[nsinfo['id']] = nsinfo if ns != "0": attr = "NS_%s" % (nsdata[ns]['canonical'].replace(' ', '_').upper()) else: attr = "NS_MAIN" setattr(self, attr.encode('utf8'), Namespace(ns.encode('utf8'))) nsaliasdata = info['query']['namespacealiases'] if nsaliasdata: for ns in nsaliasdata: self.NSaliases[ns['*']] = ns['id'] if not 'writeapi' in sidata: print "WARNING: Write-API not enabled, you will not be able to edit" version = re.search("\d\.(\d\d)", self.siteinfo['generator']) if not int(version.group(1)) >= 13: # Will this even work on 13? print "WARNING: Some features may not work on older versions of MediaWiki" return self | a272001a596e895fe13d86be11333bd72d7e5487 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8398/a272001a596e895fe13d86be11333bd72d7e5487/wiki.py |
setattr(self, attr, Namespace(ns)) | setattr(self, attr.encode('utf8'), Namespace(ns.encode('utf8'))) | def setSiteinfo(self): """Retrieves basic siteinfo Called when constructing, or after login if the first call failed """ params = {'action':'query', 'meta':'siteinfo', 'siprop':'general|namespaces|namespacealiases', } if self.maxlag < 120: params['maxlag'] = 120 req = api.APIRequest(self, params) info = req.query() sidata = info['query']['general'] for item in sidata: self.siteinfo[item] = sidata[item] nsdata = info['query']['namespaces'] for ns in nsdata: nsinfo = nsdata[ns] self.namespaces[nsinfo['id']] = nsinfo if ns != "0": attr = "NS_%s" % (nsdata[ns]['canonical'].replace(' ', '_').upper()) else: attr = "NS_MAIN" setattr(self, attr, Namespace(ns)) nsaliasdata = info['query']['namespacealiases'] if nsaliasdata: for ns in nsaliasdata: self.NSaliases[ns['*']] = ns['id'] if not 'writeapi' in sidata: print "WARNING: Write-API not enabled, you will not be able to edit" version = re.search("\d\.(\d\d)", self.siteinfo['generator']) if not int(version.group(1)) >= 13: # Will this even work on 13? print "WARNING: Some features may not work on older versions of MediaWiki" return self | 57f877694fce49c733a515722e3a869697f2b09d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8398/57f877694fce49c733a515722e3a869697f2b09d/wiki.py |
print content | def __parseJSON(self, data): maxlag = True while maxlag: try: maxlag = False parsed = json.loads(data.read()) content = None if isinstance(parsed, dict): content = APIResult(parsed) content.response = self.response.items() elif isinstance(parsed, list): content = APIListResult(parsed) content.response = self.response.items() else: content = parsed print content if 'error' in content: error = content['error']['code'] if error == "maxlag": lagtime = int(re.search("(\d+) seconds", content['error']['info']).group(1)) if lagtime > self.wiki.maxwaittime: lagtime = self.wiki.maxwaittime print("Server lag, sleeping for "+str(lagtime)+" seconds") maxlag = True time.sleep(int(lagtime)+0.5) return False except: # Something's wrong with the data... data.seek(0) if "MediaWiki API is not enabled for this site. Add the following line to your LocalSettings.php<pre><b>$wgEnableAPI=true;</b></pre>" in data.read(): raise APIDisabled("The API is not enabled on this site") print "Invalid JSON, trying request again" # FIXME: Would be nice if this didn't just go forever if its never going to work return False return content | 31e9c8f9b96011acf7ec7c5a5dd327849a4ceb56 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8398/31e9c8f9b96011acf7ec7c5a5dd327849a4ceb56/api.py |
|
content = APIResult(json.loads(data.read())) content.response = self.response.items() | parsed = json.loads(data.read()) content = None if isinstance(parsed, dict): content = APIResult(parsed) content.response = self.response.items() elif isinstance(parsed, list): content = APIListResult(parsed) content.response = self.response.items() else: content = parsed print content | def __parseJSON(self, data): maxlag = True while maxlag: try: maxlag = False content = APIResult(json.loads(data.read())) content.response = self.response.items() if 'error' in content: error = content['error']['code'] if error == "maxlag": lagtime = int(re.search("(\d+) seconds", content['error']['info']).group(1)) if lagtime > self.wiki.maxwaittime: lagtime = self.wiki.maxwaittime print("Server lag, sleeping for "+str(lagtime)+" seconds") maxlag = True time.sleep(int(lagtime)+0.5) return False except: # Something's wrong with the data... data.seek(0) if "MediaWiki API is not enabled for this site. Add the following line to your LocalSettings.php<pre><b>$wgEnableAPI=true;</b></pre>" in data.read(): raise APIDisabled("The API is not enabled on this site") print "Invalid JSON, trying request again" # FIXME: Would be nice if this didn't just go forever if its never going to work return False return content | 5bc6b6350a20cb3e9d7d70fb88a24208feccf583 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8398/5bc6b6350a20cb3e9d7d70fb88a24208feccf583/api.py |
if( os.path.exists( self.ptMount + "/ttgo.bif" ) ): | fileTTGObif = self.ptMount + "/ttgo.bif" if( os.path.exists( fileTTGObif ) ): | def GetConfig( self ): | deca8f39299a234f8f3e67660be4445d50de52a8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/deca8f39299a234f8f3e67660be4445d50de52a8/pytomtom.py |
with open( self.ptMount + "/ttgo.bif", "rb" ) as ttgobif: | with open( fileTTGObif, "rb" ) as ttgobif: | def GetConfig( self ): | deca8f39299a234f8f3e67660be4445d50de52a8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/deca8f39299a234f8f3e67660be4445d50de52a8/pytomtom.py |
cmd = ("mkdir '" + self.dirPoi + filename + "'" ) | cmd = ("mkdir -p '" + self.dirPoi + filename + "'" ) | def addPoiToDatabase( self,entry ): | deca8f39299a234f8f3e67660be4445d50de52a8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/deca8f39299a234f8f3e67660be4445d50de52a8/pytomtom.py |
def addPoiToDatabase( self,entry ): | deca8f39299a234f8f3e67660be4445d50de52a8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/deca8f39299a234f8f3e67660be4445d50de52a8/pytomtom.py |
||
self.poiCombo.append_text( filename ) | def addPoiToDatabase( self,entry ): | deca8f39299a234f8f3e67660be4445d50de52a8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/deca8f39299a234f8f3e67660be4445d50de52a8/pytomtom.py |
|
def IsPtMount(self, mountPoint): | 38a8d86bb10af730f7c25471a9b3088cab57a071 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/38a8d86bb10af730f7c25471a9b3088cab57a071/pytomtom.py |
||
cmd += " 2> /dev/null | tail -n +2 | tr -s ' ' | cut -d ' ' -f 4,7 --output-delimiter=," | cmd += " 2> /dev/null | tail -n +2 | tr -s ' ' | cut -d ' ' -f 4,7-" | def GetPtWithSize( self, type = None, ptMount = None ): | 38a8d86bb10af730f7c25471a9b3088cab57a071 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/38a8d86bb10af730f7c25471a9b3088cab57a071/pytomtom.py |
line = line.split( ',', 2 ) self.Debug( 5, "Command result: " + str( int( line[0 ] ) ) + " -> " + line[ 1 ] ) res.append( [ int( line[ 0 ] ), line[ 1 ] ] ) | line = line.split( ' ', 1 ) self.Debug( 5, "Command result: " + str( int( line[ 0 ] ) ) + " -> " + line[ -1 ] ) res.append( [ int( line[ 0 ] ), line[ -1 ] ] ) | def GetPtWithSize( self, type = None, ptMount = None ): | 38a8d86bb10af730f7c25471a9b3088cab57a071 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/38a8d86bb10af730f7c25471a9b3088cab57a071/pytomtom.py |
files = os.listdir( self.dirPoi ) files.sort() for file in files: self.poiCombo.append_text( file ) | if( os.path.exists( self.dirPoi ) ): files = os.listdir( self.dirPoi ) files.sort() for file in files: self.poiCombo.append_text( file ) | def FramePoi( self, notebook ): | 38a8d86bb10af730f7c25471a9b3088cab57a071 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/38a8d86bb10af730f7c25471a9b3088cab57a071/pytomtom.py |
cmd = ( "umount " + self.ptMount ) | cmd = ( "umount '" + self.ptMount +"'" ) | def UMount(self, mountPoint): | 8243e11b0eb6e861e9a780faca385bc774f517b8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/8243e11b0eb6e861e9a780faca385bc774f517b8/pytomtom.py |
cmd = ("cp '" + self.dirPoi + selectedPoi + "/'* " + self.ptMount + "/" + self.CurrentMap ) | cmd = ("cp '" + self.dirPoi + selectedPoi + "/'* '" + self.ptMount + "'/" + self.CurrentMap ) | def addPoiToTomtom( self,entry ): | 8243e11b0eb6e861e9a780faca385bc774f517b8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/8243e11b0eb6e861e9a780faca385bc774f517b8/pytomtom.py |
cmd = ("rm " + self.ptMount + "/" + self.CurrentMap + "/'" + file + "'") | cmd = ("rm -f '" + self.ptMount + "'/" + self.CurrentMap + "/'" + file + "'") | def delPoiOnTomtom( self,entry ): | 8243e11b0eb6e861e9a780faca385bc774f517b8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/8243e11b0eb6e861e9a780faca385bc774f517b8/pytomtom.py |
cmd = "cd " + self.ptMount + "; tar -" + option + "f \"" + self.fileName + "\" ." | cmd = "cd '" + self.ptMount + "'; tar -" + option + "f \"" + self.fileName + "\" ." | def BackupRestoreGPS( self, widget, type ): | 80409b0c1bd73a06ef6008e89a7706fb6832ba11 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/80409b0c1bd73a06ef6008e89a7706fb6832ba11/pytomtom.py |
cmd = "cd " + self.ptMount + "; echo tar -" + option + "f \"" + self.fileName + "\" ." | cmd = "cd '" + self.ptMount + "'; echo tar -" + option + "f \"" + self.fileName + "\" ." | def BackupRestoreGPS( self, widget, type ): | 80409b0c1bd73a06ef6008e89a7706fb6832ba11 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/80409b0c1bd73a06ef6008e89a7706fb6832ba11/pytomtom.py |
self.popup = gtk.FileChooserDialog( _( "Open..." ), gtk.Window( gtk.WINDOW_TOPLEVEL ), | self.popup = gtk.FileChooserDialog( _( "Open folder..." ), gtk.Window( gtk.WINDOW_TOPLEVEL ), | def addPoiToDatabase( self,entry ): | 80409b0c1bd73a06ef6008e89a7706fb6832ba11 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/80409b0c1bd73a06ef6008e89a7706fb6832ba11/pytomtom.py |
def addPoiToDatabase( self,entry ): | 80409b0c1bd73a06ef6008e89a7706fb6832ba11 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/603/80409b0c1bd73a06ef6008e89a7706fb6832ba11/pytomtom.py |
||
ip_tags = TagContainer(ip) control_tags = TagContainer(control) | ip_tags = TagContainer() control_tags = TagContainer() ip_tags(ip) control_tags(control) | def main(): # option parser usage = 'peakzilla [OPTIONS] IP_ALIGNMENT CONTROL_ALIGNMENT > RESULTS' parser = OptionParser(usage=usage) parser.add_option("-s", "--fragment_size",\ type = "int", dest="fragment_size", default="200",\ help = "fragment size in bp used to define the upper limit of peak size for modeling: default=200") parser.add_option("-m", "--model_threshold",\ type = "float", dest="model_threshold", default="120",\ help = "fold enrichment threshold over average read density for building the peak model: default=120") parser.add_option("-t", "--peak_threshold",\ type = "float", dest="peak_threshold", default="40",\ help = "fold enrichment threshold over average read density for finding candidate peaks: default=40") parser.add_option("-f", "--fdr",\ type = "float", dest="fdr", default='1',\ help = "cutoff for the estimated FDR value: default = 1") parser.add_option("-q", "--quiet",\ action = "store_false", dest="verbose", default=True,\ help = "don't print status messages") # read arguments and options (options, args) = parser.parse_args() if len(args) != 2: # if incorrect number of arguments are provided return help message parser.print_help() sys.exit(0) ip = args[0] control = args[1] # load tags print_status('Loading tags ...', options.verbose) ip_tags = TagContainer(ip) control_tags = TagContainer(control) # first attempt of modeling peak size print_status('Modeling peak size and shift ...', options.verbose) peak_model = PeakModel(ip_tags, options.fragment_size, options.model_threshold) # change model threshold until it yields a reasonable number of peaks while peak_model.peaks_incorporated < 800 or peak_model.peaks_incorporated > 1200: if peak_model.peaks_incorporated < 800: options.model_threshold = options.model_threshold / 2 print_status('Model threshold was set too high, trying: %.1f' % options.model_threshold, options.verbose) peak_model = PeakModel(ip_tags, options.fragment_size, options.model_threshold) else: options.model_threshold = options.model_threshold * 1.5 print_status('Model threshold was set too low, trying: %.1f' % options.model_threshold, options.verbose) peak_model = PeakModel(ip_tags, options.fragment_size, options.model_threshold) print_status('Used best %d peaks for modeling ...' % peak_model.peaks_incorporated, options.verbose) print_status('Peak size is %d bp' % peak_model.peak_size, options.verbose) # first attempt to find candidate peaks in control sample print_status('Finding potential false positives ...', options.verbose) control_peaks = PeakContainer(control_tags, ip_tags, peak_model.peak_size, options.peak_threshold) # change peak threshold until it yields a reasonable number of peaks while control_peaks.peak_count < 1000 or control_peaks.peak_count > 10000: if control_peaks.peak_count < 1000: options.peak_threshold = options.peak_threshold / 2 print_status('Peak threshold was set too high, trying: %.2f' % options.peak_threshold, options.verbose) control_peaks = PeakContainer(control_tags, ip_tags, peak_model.peak_size, options.peak_threshold) else: options.peak_threshold = options.peak_threshold * 1.5 print_status('Peak threshold was set too low, trying: %.2f' % options.peak_threshold, options.verbose) control_peaks = PeakContainer(control_tags, ip_tags, peak_model.peak_size, options.peak_threshold) print_status('%d potential false positives found' % control_peaks.peak_count, options.verbose) # find candidate peaks in IP sample print_status('Finding peak candidates ...', options.verbose) ip_peaks = PeakContainer(ip_tags, control_tags, peak_model.peak_size, options.peak_threshold) print_status('%d candidate peaks found' % ip_peaks.peak_count, options.verbose) # build distribution model print_status('Modeling tag distribution ...', options.verbose) distribution_model = ip_peaks.model_tag_distribution() # calculate tag distribution scores print_status('Calculating tag distribution scores ...', options.verbose) ip_peaks.determine_distribution_scores(distribution_model) control_peaks.determine_distribution_scores(distribution_model) # calculate FDR print_status('Calculating FDR ...', options.verbose) ip_peaks.calculate_fdr(control_peaks.peaks) # write output as bed files print_status('Writing results to file ...', options.verbose) ip_peaks.write_to_stdout(options.fdr) print_status('Done!', options.verbose) | c2c73624eaedf4bddea0eb24ed23b898dcdc41f4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14395/c2c73624eaedf4bddea0eb24ed23b898dcdc41f4/peakzilla.py |
usage = 'peakzilla [OPTIONS] IP_ALIGNMENT CONTROL_ALIGNMENT > RESULTS' | usage = 'peakzilla [OPTIONS] chip.bed control.bed > results.tsv' | def main(): # option parser usage = 'peakzilla [OPTIONS] IP_ALIGNMENT CONTROL_ALIGNMENT > RESULTS' parser = OptionParser(usage=usage) parser.add_option("-s", "--fragment_size",\ type = "int", dest="fragment_size", default="200",\ help = "fragment size in bp used to define the upper limit of peak size for modeling: default=200") parser.add_option("-m", "--model_threshold",\ type = "float", dest="model_threshold", default="120",\ help = "fold enrichment threshold over average read density for building the peak model: default=120") parser.add_option("-t", "--peak_threshold",\ type = "float", dest="peak_threshold", default="40",\ help = "fold enrichment threshold over average read density for finding candidate peaks: default=40") parser.add_option("-f", "--fdr",\ type = "float", dest="fdr", default='1',\ help = "cutoff for the estimated FDR value: default = 1") parser.add_option("-q", "--quiet",\ action = "store_false", dest="verbose", default=True,\ help = "don't print status messages") # read arguments and options (options, args) = parser.parse_args() if len(args) != 2: # if incorrect number of arguments are provided return help message parser.print_help() sys.exit(0) ip = args[0] control = args[1] # load tags print_status('Loading tags ...', options.verbose) ip_tags = TagContainer() control_tags = TagContainer() ip_tags(ip) control_tags(control) # first attempt of modeling peak size print_status('Modeling peak size and shift ...', options.verbose) peak_model = PeakModel(ip_tags, options.fragment_size, options.model_threshold) # change model threshold until it yields a reasonable number of peaks while peak_model.peaks_incorporated < 800 or peak_model.peaks_incorporated > 1200: if peak_model.peaks_incorporated < 800: options.model_threshold = options.model_threshold / 2 print_status('Model threshold was set too high, trying: %.1f' % options.model_threshold, options.verbose) peak_model = PeakModel(ip_tags, options.fragment_size, options.model_threshold) else: options.model_threshold = options.model_threshold * 1.5 print_status('Model threshold was set too low, trying: %.1f' % options.model_threshold, options.verbose) peak_model = PeakModel(ip_tags, options.fragment_size, options.model_threshold) print_status('Used best %d peaks for modeling ...' % peak_model.peaks_incorporated, options.verbose) print_status('Peak size is %d bp' % peak_model.peak_size, options.verbose) # first attempt to find candidate peaks in control sample print_status('Finding potential false positives ...', options.verbose) control_peaks = PeakContainer(control_tags, ip_tags, peak_model.peak_size, options.peak_threshold) # change peak threshold until it yields a reasonable number of peaks while control_peaks.peak_count < 2000 or control_peaks.peak_count > 10000: if control_peaks.peak_count < 2000: options.peak_threshold = options.peak_threshold / 2 print_status('Peak threshold was set too high, trying: %.2f' % options.peak_threshold, options.verbose) control_peaks = PeakContainer(control_tags, ip_tags, peak_model.peak_size, options.peak_threshold) else: options.peak_threshold = options.peak_threshold * 1.5 print_status('Peak threshold was set too low, trying: %.2f' % options.peak_threshold, options.verbose) control_peaks = PeakContainer(control_tags, ip_tags, peak_model.peak_size, options.peak_threshold) print_status('%d potential false positives found' % control_peaks.peak_count, options.verbose) # find candidate peaks in IP sample print_status('Finding peak candidates ...', options.verbose) ip_peaks = PeakContainer(ip_tags, control_tags, peak_model.peak_size, options.peak_threshold) print_status('%d candidate peaks found' % ip_peaks.peak_count, options.verbose) # build distribution model print_status('Modeling tag distribution ...', options.verbose) distribution_model = ip_peaks.model_tag_distribution() # calculate tag distribution scores print_status('Calculating tag distribution scores ...', options.verbose) ip_peaks.determine_distribution_scores(distribution_model) control_peaks.determine_distribution_scores(distribution_model) # calculate FDR print_status('Calculating FDR ...', options.verbose) ip_peaks.calculate_fdr(control_peaks.peaks) # write output as bed files print_status('Writing results to file ...', options.verbose) ip_peaks.write_to_stdout(options.fdr) print_status('Done!', options.verbose) | 6b776388de141ba8dec6dd5ef788688a91d0c759 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14395/6b776388de141ba8dec6dd5ef788688a91d0c759/peakzilla.py |
self.peak_shift = int(median(self.peak_shifts)) self.peak_size = self.peak_shift * 2 | if self.peak_shifts: self.peak_shift = int(median(self.peak_shifts)) self.peak_size = self.peak_shift * 2 | def build(self): # for all chromosomes look for shifted peaks for chrom in self.tags.get_chrom_names(): plus_peaks = self.find_simple_peaks(chrom, '+') minus_peaks = self.find_simple_peaks(chrom, '-') self.determine_shifts(plus_peaks, minus_peaks) # calculate the meidan peak_shift self.peak_shift = int(median(self.peak_shifts)) # peak size is 2 * shift size self.peak_size = self.peak_shift * 2 | 6b776388de141ba8dec6dd5ef788688a91d0c759 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14395/6b776388de141ba8dec6dd5ef788688a91d0c759/peakzilla.py |
candidate_survivals = 0 | peak_candidate.survivals = 0 | def find_peaks(self, chrom): # identify peak candidates on chromosome self.peaks[chrom] = [] # convert tag arrays to deque for fast appending and popping plus_tags = deque(self.ip_tags.get_tags(chrom, '+')) minus_tags = deque(self.ip_tags.get_tags(chrom, '-')) # initalize windows and stuff plus_window = deque([]) minus_window = deque([]) score_buffer = deque([]) peak_candidate = Peak() position = 0 while plus_tags and minus_tags: # fill windows while plus_tags and plus_tags[0] < position: plus_window.append(plus_tags.popleft()) while minus_tags and minus_tags[0] < (position + self.peak_shift): minus_window.append(minus_tags.popleft()) # get rid of old tags not fitting in the window any more while plus_window and plus_window[0] < (position - self.peak_shift): plus_window.popleft() while minus_window and minus_window[0] < position: minus_window.popleft() # add position to region if over threshold score = len(plus_window) + len(minus_window) if score > self.tag_threshold: # save all scores in buffer score_buffer.append(score) # get rid of old scores that are outside of the filter if len(score_buffer) > self.peak_size: score_buffer.popleft() # if current score is as big or bigger, consider it instead if score >= peak_candidate.tag_count: peak_candidate.tag_count = score peak_candidate.position = position peak_candidate.tags = list(plus_window) + list(minus_window) candidate_survivals = 0 # candidate survives if current score is smaller else: candidate_survivals += 1 # if candidate survives long enough do the expensive lookup if candidate_survivals == self.peak_shift: # check score buffer to see whether candidate is a maximum # candidate is in the middle of the buffer now if peak_candidate.tag_count == max(score_buffer): self.add_peak(peak_candidate, chrom) # consider current score next, reset survivals peak_candidate.tag_count = score peak_candidate.position = position peak_candidate.tags = list(plus_window) + list(minus_window) candidate_survivals = 0 # while in enriched region move windows in 1 bp steps position += 1 else: # if we still have a candidate check whether its a max and add if peak_candidate: if peak_candidate.tag_count == max(score_buffer): self.add_peak(peak_candidate, chrom) peak_candidate = Peak() candidate_survivals = 0 score_buffer = deque([]) # determine the next informative position in the genome and move there if plus_tags and minus_tags: distance_to_next = plus_tags[0] - position + 1 position += distance_to_next | cf4313aa64a9e9b865d9f2f493cc2a5c955fc5bf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14395/cf4313aa64a9e9b865d9f2f493cc2a5c955fc5bf/peakzilla.py |
candidate_survivals += 1 | peak_candidate.survivals += 1 | def find_peaks(self, chrom): # identify peak candidates on chromosome self.peaks[chrom] = [] # convert tag arrays to deque for fast appending and popping plus_tags = deque(self.ip_tags.get_tags(chrom, '+')) minus_tags = deque(self.ip_tags.get_tags(chrom, '-')) # initalize windows and stuff plus_window = deque([]) minus_window = deque([]) score_buffer = deque([]) peak_candidate = Peak() position = 0 while plus_tags and minus_tags: # fill windows while plus_tags and plus_tags[0] < position: plus_window.append(plus_tags.popleft()) while minus_tags and minus_tags[0] < (position + self.peak_shift): minus_window.append(minus_tags.popleft()) # get rid of old tags not fitting in the window any more while plus_window and plus_window[0] < (position - self.peak_shift): plus_window.popleft() while minus_window and minus_window[0] < position: minus_window.popleft() # add position to region if over threshold score = len(plus_window) + len(minus_window) if score > self.tag_threshold: # save all scores in buffer score_buffer.append(score) # get rid of old scores that are outside of the filter if len(score_buffer) > self.peak_size: score_buffer.popleft() # if current score is as big or bigger, consider it instead if score >= peak_candidate.tag_count: peak_candidate.tag_count = score peak_candidate.position = position peak_candidate.tags = list(plus_window) + list(minus_window) candidate_survivals = 0 # candidate survives if current score is smaller else: candidate_survivals += 1 # if candidate survives long enough do the expensive lookup if candidate_survivals == self.peak_shift: # check score buffer to see whether candidate is a maximum # candidate is in the middle of the buffer now if peak_candidate.tag_count == max(score_buffer): self.add_peak(peak_candidate, chrom) # consider current score next, reset survivals peak_candidate.tag_count = score peak_candidate.position = position peak_candidate.tags = list(plus_window) + list(minus_window) candidate_survivals = 0 # while in enriched region move windows in 1 bp steps position += 1 else: # if we still have a candidate check whether its a max and add if peak_candidate: if peak_candidate.tag_count == max(score_buffer): self.add_peak(peak_candidate, chrom) peak_candidate = Peak() candidate_survivals = 0 score_buffer = deque([]) # determine the next informative position in the genome and move there if plus_tags and minus_tags: distance_to_next = plus_tags[0] - position + 1 position += distance_to_next | cf4313aa64a9e9b865d9f2f493cc2a5c955fc5bf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14395/cf4313aa64a9e9b865d9f2f493cc2a5c955fc5bf/peakzilla.py |
if candidate_survivals == self.peak_shift: | if peak_candidate.survivals == self.peak_shift: | def find_peaks(self, chrom): # identify peak candidates on chromosome self.peaks[chrom] = [] # convert tag arrays to deque for fast appending and popping plus_tags = deque(self.ip_tags.get_tags(chrom, '+')) minus_tags = deque(self.ip_tags.get_tags(chrom, '-')) # initalize windows and stuff plus_window = deque([]) minus_window = deque([]) score_buffer = deque([]) peak_candidate = Peak() position = 0 while plus_tags and minus_tags: # fill windows while plus_tags and plus_tags[0] < position: plus_window.append(plus_tags.popleft()) while minus_tags and minus_tags[0] < (position + self.peak_shift): minus_window.append(minus_tags.popleft()) # get rid of old tags not fitting in the window any more while plus_window and plus_window[0] < (position - self.peak_shift): plus_window.popleft() while minus_window and minus_window[0] < position: minus_window.popleft() # add position to region if over threshold score = len(plus_window) + len(minus_window) if score > self.tag_threshold: # save all scores in buffer score_buffer.append(score) # get rid of old scores that are outside of the filter if len(score_buffer) > self.peak_size: score_buffer.popleft() # if current score is as big or bigger, consider it instead if score >= peak_candidate.tag_count: peak_candidate.tag_count = score peak_candidate.position = position peak_candidate.tags = list(plus_window) + list(minus_window) candidate_survivals = 0 # candidate survives if current score is smaller else: candidate_survivals += 1 # if candidate survives long enough do the expensive lookup if candidate_survivals == self.peak_shift: # check score buffer to see whether candidate is a maximum # candidate is in the middle of the buffer now if peak_candidate.tag_count == max(score_buffer): self.add_peak(peak_candidate, chrom) # consider current score next, reset survivals peak_candidate.tag_count = score peak_candidate.position = position peak_candidate.tags = list(plus_window) + list(minus_window) candidate_survivals = 0 # while in enriched region move windows in 1 bp steps position += 1 else: # if we still have a candidate check whether its a max and add if peak_candidate: if peak_candidate.tag_count == max(score_buffer): self.add_peak(peak_candidate, chrom) peak_candidate = Peak() candidate_survivals = 0 score_buffer = deque([]) # determine the next informative position in the genome and move there if plus_tags and minus_tags: distance_to_next = plus_tags[0] - position + 1 position += distance_to_next | cf4313aa64a9e9b865d9f2f493cc2a5c955fc5bf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14395/cf4313aa64a9e9b865d9f2f493cc2a5c955fc5bf/peakzilla.py |
peak_candidate.tag_count = score peak_candidate.position = position peak_candidate.tags = list(plus_window) + list(minus_window) candidate_survivals = 0 | peak_candidate = Peak() | def find_peaks(self, chrom): # identify peak candidates on chromosome self.peaks[chrom] = [] # convert tag arrays to deque for fast appending and popping plus_tags = deque(self.ip_tags.get_tags(chrom, '+')) minus_tags = deque(self.ip_tags.get_tags(chrom, '-')) # initalize windows and stuff plus_window = deque([]) minus_window = deque([]) score_buffer = deque([]) peak_candidate = Peak() position = 0 while plus_tags and minus_tags: # fill windows while plus_tags and plus_tags[0] < position: plus_window.append(plus_tags.popleft()) while minus_tags and minus_tags[0] < (position + self.peak_shift): minus_window.append(minus_tags.popleft()) # get rid of old tags not fitting in the window any more while plus_window and plus_window[0] < (position - self.peak_shift): plus_window.popleft() while minus_window and minus_window[0] < position: minus_window.popleft() # add position to region if over threshold score = len(plus_window) + len(minus_window) if score > self.tag_threshold: # save all scores in buffer score_buffer.append(score) # get rid of old scores that are outside of the filter if len(score_buffer) > self.peak_size: score_buffer.popleft() # if current score is as big or bigger, consider it instead if score >= peak_candidate.tag_count: peak_candidate.tag_count = score peak_candidate.position = position peak_candidate.tags = list(plus_window) + list(minus_window) candidate_survivals = 0 # candidate survives if current score is smaller else: candidate_survivals += 1 # if candidate survives long enough do the expensive lookup if candidate_survivals == self.peak_shift: # check score buffer to see whether candidate is a maximum # candidate is in the middle of the buffer now if peak_candidate.tag_count == max(score_buffer): self.add_peak(peak_candidate, chrom) # consider current score next, reset survivals peak_candidate.tag_count = score peak_candidate.position = position peak_candidate.tags = list(plus_window) + list(minus_window) candidate_survivals = 0 # while in enriched region move windows in 1 bp steps position += 1 else: # if we still have a candidate check whether its a max and add if peak_candidate: if peak_candidate.tag_count == max(score_buffer): self.add_peak(peak_candidate, chrom) peak_candidate = Peak() candidate_survivals = 0 score_buffer = deque([]) # determine the next informative position in the genome and move there if plus_tags and minus_tags: distance_to_next = plus_tags[0] - position + 1 position += distance_to_next | cf4313aa64a9e9b865d9f2f493cc2a5c955fc5bf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14395/cf4313aa64a9e9b865d9f2f493cc2a5c955fc5bf/peakzilla.py |
candidate_survivals = 0 | def find_peaks(self, chrom): # identify peak candidates on chromosome self.peaks[chrom] = [] # convert tag arrays to deque for fast appending and popping plus_tags = deque(self.ip_tags.get_tags(chrom, '+')) minus_tags = deque(self.ip_tags.get_tags(chrom, '-')) # initalize windows and stuff plus_window = deque([]) minus_window = deque([]) score_buffer = deque([]) peak_candidate = Peak() position = 0 while plus_tags and minus_tags: # fill windows while plus_tags and plus_tags[0] < position: plus_window.append(plus_tags.popleft()) while minus_tags and minus_tags[0] < (position + self.peak_shift): minus_window.append(minus_tags.popleft()) # get rid of old tags not fitting in the window any more while plus_window and plus_window[0] < (position - self.peak_shift): plus_window.popleft() while minus_window and minus_window[0] < position: minus_window.popleft() # add position to region if over threshold score = len(plus_window) + len(minus_window) if score > self.tag_threshold: # save all scores in buffer score_buffer.append(score) # get rid of old scores that are outside of the filter if len(score_buffer) > self.peak_size: score_buffer.popleft() # if current score is as big or bigger, consider it instead if score >= peak_candidate.tag_count: peak_candidate.tag_count = score peak_candidate.position = position peak_candidate.tags = list(plus_window) + list(minus_window) candidate_survivals = 0 # candidate survives if current score is smaller else: candidate_survivals += 1 # if candidate survives long enough do the expensive lookup if candidate_survivals == self.peak_shift: # check score buffer to see whether candidate is a maximum # candidate is in the middle of the buffer now if peak_candidate.tag_count == max(score_buffer): self.add_peak(peak_candidate, chrom) # consider current score next, reset survivals peak_candidate.tag_count = score peak_candidate.position = position peak_candidate.tags = list(plus_window) + list(minus_window) candidate_survivals = 0 # while in enriched region move windows in 1 bp steps position += 1 else: # if we still have a candidate check whether its a max and add if peak_candidate: if peak_candidate.tag_count == max(score_buffer): self.add_peak(peak_candidate, chrom) peak_candidate = Peak() candidate_survivals = 0 score_buffer = deque([]) # determine the next informative position in the genome and move there if plus_tags and minus_tags: distance_to_next = plus_tags[0] - position + 1 position += distance_to_next | cf4313aa64a9e9b865d9f2f493cc2a5c955fc5bf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14395/cf4313aa64a9e9b865d9f2f493cc2a5c955fc5bf/peakzilla.py |
|
global IGNORE_LIST | def load_lists(force=False): """Load ignore and filtered word lists""" debug_print('Loading ignore list') if not IGNORE_LIST or force is True: global IGNORE_LIST IGNORE_LIST = [ line.lower().strip() for line in open(settings.ignore_list) ] debug_print('Loading filtered word list') if not FILTER_WORDS or force is True: global FILTER_WORDS FILTER_WORDS = [ line.lower().strip() for line in open(settings.filtered_word_list) ] | da0f0396df83dfa4567a234ed0815264c9a64493 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8568/da0f0396df83dfa4567a234ed0815264c9a64493/repeater.py |
|
global FILTER_WORDS | def load_lists(force=False): """Load ignore and filtered word lists""" debug_print('Loading ignore list') if not IGNORE_LIST or force is True: global IGNORE_LIST IGNORE_LIST = [ line.lower().strip() for line in open(settings.ignore_list) ] debug_print('Loading filtered word list') if not FILTER_WORDS or force is True: global FILTER_WORDS FILTER_WORDS = [ line.lower().strip() for line in open(settings.filtered_word_list) ] | da0f0396df83dfa4567a234ed0815264c9a64493 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8568/da0f0396df83dfa4567a234ed0815264c9a64493/repeater.py |
|
else: save_id(settings.lastid,reply.id) | save_id(settings.lastid,reply.id) | def main(): auth = tweepy.BasicAuthHandler(username=settings.username, password=settings.password) api = tweepy.API(auth_handler=auth, secure=True, retry_count=3) last_id = get_last_id(settings.lastid) debug_print('Loading friends list') friends = api.friends_ids() debug_print('Friend list loaded, size: %d' % len(friends)) try: debug_print('Retrieving mentions') replies = api.mentions() except Exception, e: # quit on error here print e exit(1) # want these in ascending order, api orders them descending replies.reverse() for reply in replies: # ignore tweet if it's id is lower than our last tweeted id if reply.id > last_id and reply.user.id in friends: try: careful_retweet(api,reply) except HTTPError, e: print e.code() print e.read() except Exception, e: print 'e: %s' % e print repr(e) else: save_id(settings.lastid,reply.id) debug_print('Exiting cleanly') | fe0d37bfd57aba9337880ab3961550fe5c4adc8c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8568/fe0d37bfd57aba9337880ab3961550fe5c4adc8c/repeater.py |
else: debug_print('Received smaller ID, not saving. Old: %d, New: %s' % ( last_id, id)) | def save_id(statefile,id): """Save last status ID to a file""" last_id = get_last_id(statefile) if last_id < id: f = open(statefile,'w') f.write(str(id)) # no trailing newline f.close() # Don't need to do anything if for some reason the ID got smaller | 118eedbd15d5c29fd847e328fc29033fced9d896 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8568/118eedbd15d5c29fd847e328fc29033fced9d896/repeater.py |
|
pass | for fd, obj in asyncore.socket_map.items(): obj.close() | def main_func(): options = _parse_options() if options.message_filter: from utils import MessageMap MessageMap.set_filter(options.message_filter) os.chdir(options.root) try: run_proxy(options) except KeyboardInterrupt: # todo: shut down the open connections cleanly pass """ import cProfile, sys p=open("profile", "w") sys.stdout = p cProfile.run("run_proxy(count = 5000, context = options)") p.close() """ | 662306156b2ad6900811b493e962a0d1e29ac277 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10923/662306156b2ad6900811b493e962a0d1e29ac277/dragonkeeper.py |
msg = '["%s",%s,%s,%s,%s]' % ( | message = '["%s",%s,%s,%s,%s]' % ( | def handle_scope_message(self, msg): msg = '["%s",%s,%s,%s,%s]' % ( msg[1], # service msg[2], # command msg[4], # status msg[5], # tag msg[8], # payload ) if self.debug: pretty_print("send to client:", msg, self.debug_format, self.debug_format_payload) self.send_message(msg) | c13b2ba635ee2da491abf2bb3ad577d14bd228e9 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10923/c13b2ba635ee2da491abf2bb3ad577d14bd228e9/stpwebsocket.py |
self.send_message(msg) | self.send_message(message) | def handle_scope_message(self, msg): msg = '["%s",%s,%s,%s,%s]' % ( msg[1], # service msg[2], # command msg[4], # status msg[5], # tag msg[8], # payload ) if self.debug: pretty_print("send to client:", msg, self.debug_format, self.debug_format_payload) self.send_message(msg) | c13b2ba635ee2da491abf2bb3ad577d14bd228e9 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10923/c13b2ba635ee2da491abf2bb3ad577d14bd228e9/stpwebsocket.py |
return ''.join([chr((n >> 8 * (3 - i)) & 0xff) for i in range(4)]) | return pack("!I", n) | def _get_number(self, in_str): n = int(''.join([i for i in in_str if i.isdigit()])) / in_str.count(' ') return ''.join([chr((n >> 8 * (3 - i)) & 0xff) for i in range(4)]) # throws DeprecationWarning: struct integer overflow masking is deprecated # for e.g. 2621808669 with Python 2.6.5 # return pack("!i", n & 0xffffffff) | 868a4c7226a2bf89036c70f81743420d8d29b063 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10923/868a4c7226a2bf89036c70f81743420d8d29b063/websocket.py |
print "%s%s: %s" % ( indent * INDENT, name, value) | try: print "%s%s: %s" % ( indent * INDENT, name, value) except: print "%s%s: %s%s" % ( indent * INDENT, name, value[0:100], '...') | def pretty_print_payload_item(indent, name, definition, item): if item and "message" in definition: print "%s%s:" % (indent * INDENT, name) pretty_print_payload(item, definition["message"], indent=indent+1) else: value = item if "enum" in definition: value = "%s (%s)" % (definition['enum']['numbers'][item], item) elif item == None: value = "null" elif isinstance(item, str): value = "\"%s\"" % item print "%s%s: %s" % ( indent * INDENT, name, value) | 71d427774cd292cc126e15cdd04e63a60a4e7264 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10923/71d427774cd292cc126e15cdd04e63a60a4e7264/utils.py |
self.PATH_INFO = self.REQUEST_URI[pos:] | path_info = self.REQUEST_URI[pos:] if "?" in path_info: path_info = path_info[0:path_info.find("?")] self.PATH_INFO = path_info | def check_is_cgi(self, system_path, handler=".cgi"): # system path of the cgi script self.cgi_script = "" self.SCRIPT_NAME = "" self.PATH_INFO = "" if handler in system_path: script_path = system_path[0:system_path.find(handler) + len(handler)] if isfile(script_path): self.cgi_script = script_path pos = self.REQUEST_URI.find(handler) + len(handler) self.SCRIPT_NAME = self.REQUEST_URI[0:pos] self.PATH_INFO = self.REQUEST_URI[pos:] return bool(self.cgi_script) | 2da8bd96d1ebfa12a05a60f9d9a0566d3f805b18 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10923/2da8bd96d1ebfa12a05a60f9d9a0566d3f805b18/HTTPConnection.py |
headers_raw, first_line, self.headers, self.in_buffer = raw_parsed_headers | (headers_raw, first_line, self.headers, self.in_buffer) = raw_parsed_headers | def read_headers(self): raw_parsed_headers = parse_headers(self.in_buffer) if raw_parsed_headers: # to dispatch any hanging timeout response self.flush() headers_raw, first_line, self.headers, self.in_buffer = raw_parsed_headers method, path, protocol = first_line.split(BLANK, 2) self.REQUEST_URI = path path = path.lstrip("/") if "?" in path: path, self.query = path.split('?', 1) arguments = path.split("/") command = arguments and arguments.pop(0) or "" command = command.replace('-', '_').replace('.', '_') system_path = URI_to_system_path(path.rstrip("/")) or "." self.method = method self.path = path self.command = command self.arguments = arguments self.system_path = system_path self.timeout = time() + TIMEOUT if self.cgi_enabled: self.check_is_cgi(system_path) # POST if method == "POST": if "Content-Length" in self.headers: self.content_length = int(self.headers["Content-Length"]) self.check_input = self.read_content self.check_input() # GET elif method == "GET": if hasattr(self, command) and hasattr(getattr(self, command), '__call__'): getattr(self, command)() else: if self.cgi_script: self.handle_cgi() elif os.path.exists(system_path) or not path: self.serve(path, system_path) elif path == "favicon.ico": self.serve(path, path_join(SOURCE_ROOT, "favicon.ico")) else: content = "The server cannot handle: %s" % path self.out_buffer += NOT_FOUND % ( get_timestamp(), len(content), content) self.timeout = 0 if self.in_buffer: self.check_input() # Not implemented method else: content = "The server cannot handle: %s" % method self.out_buffer += NOT_FOUND % ( get_timestamp(), len(content), content) self.timeout = 0 | 052795b110d5b01b0d20d969fe4fafff4a364084 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10923/052795b110d5b01b0d20d969fe4fafff4a364084/HTTPConnection.py |
if hasattr(self, command) and hasattr(getattr(self, command), '__call__'): | if hasattr(self, command) and \ hasattr(getattr(self, command), '__call__'): | def read_headers(self): raw_parsed_headers = parse_headers(self.in_buffer) if raw_parsed_headers: # to dispatch any hanging timeout response self.flush() headers_raw, first_line, self.headers, self.in_buffer = raw_parsed_headers method, path, protocol = first_line.split(BLANK, 2) self.REQUEST_URI = path path = path.lstrip("/") if "?" in path: path, self.query = path.split('?', 1) arguments = path.split("/") command = arguments and arguments.pop(0) or "" command = command.replace('-', '_').replace('.', '_') system_path = URI_to_system_path(path.rstrip("/")) or "." self.method = method self.path = path self.command = command self.arguments = arguments self.system_path = system_path self.timeout = time() + TIMEOUT if self.cgi_enabled: self.check_is_cgi(system_path) # POST if method == "POST": if "Content-Length" in self.headers: self.content_length = int(self.headers["Content-Length"]) self.check_input = self.read_content self.check_input() # GET elif method == "GET": if hasattr(self, command) and hasattr(getattr(self, command), '__call__'): getattr(self, command)() else: if self.cgi_script: self.handle_cgi() elif os.path.exists(system_path) or not path: self.serve(path, system_path) elif path == "favicon.ico": self.serve(path, path_join(SOURCE_ROOT, "favicon.ico")) else: content = "The server cannot handle: %s" % path self.out_buffer += NOT_FOUND % ( get_timestamp(), len(content), content) self.timeout = 0 if self.in_buffer: self.check_input() # Not implemented method else: content = "The server cannot handle: %s" % method self.out_buffer += NOT_FOUND % ( get_timestamp(), len(content), content) self.timeout = 0 | 052795b110d5b01b0d20d969fe4fafff4a364084 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10923/052795b110d5b01b0d20d969fe4fafff4a364084/HTTPConnection.py |
environ["PATH_TRANSLATED"] = cwd + self.PATH_INFO.replace("/", os.path.sep) | environ["PATH_TRANSLATED"] = \ cwd + self.PATH_INFO.replace("/", os.path.sep) | def handle_cgi(self): import subprocess is_failed = False remote_addr, remote_port = self.socket.getpeername() cwd = os.getcwd() environ = { # os "COMSPEC": os.environ["COMSPEC"], "PATH": os.environ["PATH"], "PATHEXT": os.environ["PATHEXT"], "SYSTEMROOT": os.environ["SYSTEMROOT"], "WINDIR": os.environ["WINDIR"], # server "DOCUMENT_ROOT": os.getcwd().replace(os.path.sep, "/"), "GATEWAY_INTERFACE": "CGI/1.1", "QUERY_STRING": self.query, "REMOTE_ADDR": remote_addr, "REMOTE_PORT": str(remote_port), "REQUEST_METHOD": self.method, "REQUEST_URI": self.REQUEST_URI, "SCRIPT_FILENAME": cwd.replace(os.path.sep, "/") + self.SCRIPT_NAME, "SCRIPT_NAME": self.SCRIPT_NAME, "SERVER_ADDR": self.context.SERVER_ADDR, "SERVER_ADMIN": "", "SERVER_NAME": self.context.SERVER_NAME, "SERVER_PORT": str(self.context.SERVER_PORT), "SERVER_PROTOCOL": " HTTP/1.1", "SERVER_SIGNATURE": "", "SERVER_SOFTWARE": "dragonkeeper/%s" % VERSION, } if self.PATH_INFO: environ["PATH_INFO"] = self.PATH_INFO environ["PATH_TRANSLATED"] = cwd + self.PATH_INFO.replace("/", os.path.sep) for header in self.headers: key = "HTTP_%s" % header.upper().replace('-', '_') environ[key] = self.headers[header] script_abs_path = os.path.abspath(self.cgi_script) response_code = 200 response_token = 'OK' stdoutdata = "" stderrdata = "" headers = {} content = "" try: file = open(script_abs_path, 'rb') first_line = file.readline() file.close() except: is_failed = True if not is_failed: if first_line.startswith("#!"): first_line = first_line[2:].strip() else: is_failed = True if not is_failed: p = subprocess.Popen( [first_line, script_abs_path], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, env=environ, cwd=os.path.split(script_abs_path)[0] ) input = None if self.method == "POST": input = self.raw_post_data stdoutdata, stderrdata = p.communicate(input) if stderrdata: content = "\n". join([ "Error occured in the subprocess", "-------------------------------", "", stderrdata ]) headers['Content-Type'] = 'text/plain' elif stdoutdata: raw_parsed_headers = parse_headers(CRLF + stdoutdata) if raw_parsed_headers: headers_raw, first_line, headers, content = raw_parsed_headers if 'Status' in headers: response_code, response_token = headers.pop('Status').split(' ', 1) else: # assume its html content = stdoutdata headers['Content-Type'] = 'text/html' headers['Content-Length'] = len(content) self.out_buffer += RESPONSE_BASIC % ( response_code, response_token, get_timestamp(), "".join( ["%s: %s\r\n" % (key, headers[key]) for key in headers] + [CRLF, content] ) ) self.timeout = 0 | 052795b110d5b01b0d20d969fe4fafff4a364084 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10923/052795b110d5b01b0d20d969fe4fafff4a364084/HTTPConnection.py |
headers_raw, first_line, headers, content = raw_parsed_headers | (headers_raw, first_line, headers, content) = raw_parsed_headers | def handle_cgi(self): import subprocess is_failed = False remote_addr, remote_port = self.socket.getpeername() cwd = os.getcwd() environ = { # os "COMSPEC": os.environ["COMSPEC"], "PATH": os.environ["PATH"], "PATHEXT": os.environ["PATHEXT"], "SYSTEMROOT": os.environ["SYSTEMROOT"], "WINDIR": os.environ["WINDIR"], # server "DOCUMENT_ROOT": os.getcwd().replace(os.path.sep, "/"), "GATEWAY_INTERFACE": "CGI/1.1", "QUERY_STRING": self.query, "REMOTE_ADDR": remote_addr, "REMOTE_PORT": str(remote_port), "REQUEST_METHOD": self.method, "REQUEST_URI": self.REQUEST_URI, "SCRIPT_FILENAME": cwd.replace(os.path.sep, "/") + self.SCRIPT_NAME, "SCRIPT_NAME": self.SCRIPT_NAME, "SERVER_ADDR": self.context.SERVER_ADDR, "SERVER_ADMIN": "", "SERVER_NAME": self.context.SERVER_NAME, "SERVER_PORT": str(self.context.SERVER_PORT), "SERVER_PROTOCOL": " HTTP/1.1", "SERVER_SIGNATURE": "", "SERVER_SOFTWARE": "dragonkeeper/%s" % VERSION, } if self.PATH_INFO: environ["PATH_INFO"] = self.PATH_INFO environ["PATH_TRANSLATED"] = cwd + self.PATH_INFO.replace("/", os.path.sep) for header in self.headers: key = "HTTP_%s" % header.upper().replace('-', '_') environ[key] = self.headers[header] script_abs_path = os.path.abspath(self.cgi_script) response_code = 200 response_token = 'OK' stdoutdata = "" stderrdata = "" headers = {} content = "" try: file = open(script_abs_path, 'rb') first_line = file.readline() file.close() except: is_failed = True if not is_failed: if first_line.startswith("#!"): first_line = first_line[2:].strip() else: is_failed = True if not is_failed: p = subprocess.Popen( [first_line, script_abs_path], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, env=environ, cwd=os.path.split(script_abs_path)[0] ) input = None if self.method == "POST": input = self.raw_post_data stdoutdata, stderrdata = p.communicate(input) if stderrdata: content = "\n". join([ "Error occured in the subprocess", "-------------------------------", "", stderrdata ]) headers['Content-Type'] = 'text/plain' elif stdoutdata: raw_parsed_headers = parse_headers(CRLF + stdoutdata) if raw_parsed_headers: headers_raw, first_line, headers, content = raw_parsed_headers if 'Status' in headers: response_code, response_token = headers.pop('Status').split(' ', 1) else: # assume its html content = stdoutdata headers['Content-Type'] = 'text/html' headers['Content-Length'] = len(content) self.out_buffer += RESPONSE_BASIC % ( response_code, response_token, get_timestamp(), "".join( ["%s: %s\r\n" % (key, headers[key]) for key in headers] + [CRLF, content] ) ) self.timeout = 0 | 052795b110d5b01b0d20d969fe4fafff4a364084 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10923/052795b110d5b01b0d20d969fe4fafff4a364084/HTTPConnection.py |
response_code, response_token = headers.pop('Status').split(' ', 1) | response_code, response_token = \ headers.pop('Status').split(' ', 1) | def handle_cgi(self): import subprocess is_failed = False remote_addr, remote_port = self.socket.getpeername() cwd = os.getcwd() environ = { # os "COMSPEC": os.environ["COMSPEC"], "PATH": os.environ["PATH"], "PATHEXT": os.environ["PATHEXT"], "SYSTEMROOT": os.environ["SYSTEMROOT"], "WINDIR": os.environ["WINDIR"], # server "DOCUMENT_ROOT": os.getcwd().replace(os.path.sep, "/"), "GATEWAY_INTERFACE": "CGI/1.1", "QUERY_STRING": self.query, "REMOTE_ADDR": remote_addr, "REMOTE_PORT": str(remote_port), "REQUEST_METHOD": self.method, "REQUEST_URI": self.REQUEST_URI, "SCRIPT_FILENAME": cwd.replace(os.path.sep, "/") + self.SCRIPT_NAME, "SCRIPT_NAME": self.SCRIPT_NAME, "SERVER_ADDR": self.context.SERVER_ADDR, "SERVER_ADMIN": "", "SERVER_NAME": self.context.SERVER_NAME, "SERVER_PORT": str(self.context.SERVER_PORT), "SERVER_PROTOCOL": " HTTP/1.1", "SERVER_SIGNATURE": "", "SERVER_SOFTWARE": "dragonkeeper/%s" % VERSION, } if self.PATH_INFO: environ["PATH_INFO"] = self.PATH_INFO environ["PATH_TRANSLATED"] = cwd + self.PATH_INFO.replace("/", os.path.sep) for header in self.headers: key = "HTTP_%s" % header.upper().replace('-', '_') environ[key] = self.headers[header] script_abs_path = os.path.abspath(self.cgi_script) response_code = 200 response_token = 'OK' stdoutdata = "" stderrdata = "" headers = {} content = "" try: file = open(script_abs_path, 'rb') first_line = file.readline() file.close() except: is_failed = True if not is_failed: if first_line.startswith("#!"): first_line = first_line[2:].strip() else: is_failed = True if not is_failed: p = subprocess.Popen( [first_line, script_abs_path], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, env=environ, cwd=os.path.split(script_abs_path)[0] ) input = None if self.method == "POST": input = self.raw_post_data stdoutdata, stderrdata = p.communicate(input) if stderrdata: content = "\n". join([ "Error occured in the subprocess", "-------------------------------", "", stderrdata ]) headers['Content-Type'] = 'text/plain' elif stdoutdata: raw_parsed_headers = parse_headers(CRLF + stdoutdata) if raw_parsed_headers: headers_raw, first_line, headers, content = raw_parsed_headers if 'Status' in headers: response_code, response_token = headers.pop('Status').split(' ', 1) else: # assume its html content = stdoutdata headers['Content-Type'] = 'text/html' headers['Content-Length'] = len(content) self.out_buffer += RESPONSE_BASIC % ( response_code, response_token, get_timestamp(), "".join( ["%s: %s\r\n" % (key, headers[key]) for key in headers] + [CRLF, content] ) ) self.timeout = 0 | 052795b110d5b01b0d20d969fe4fafff4a364084 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10923/052795b110d5b01b0d20d969fe4fafff4a364084/HTTPConnection.py |
def __call__(self, context, rule, event): obj = context if IObjectEvent.providedBy(event): obj = event.object uid_method = getattr(obj, 'UID', None) if uid_method is not None: uid = uid_method() else: uid = '/'.join(context.getPhysicalPath()) if (uid, rule.__name__,) in self.executed: return False else: self.executed.add((uid, rule.__name__,)) return True | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def close(event): """Close the event processing when the request ends """ if hasattr(_status, 'rule_filter'): _status.rule_filter.reset() if hasattr(_status, 'delayed_events'): _status.delayed_events = {} | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def execute(context, event): """Execute all rules relative to the context, and bubble as appropriate. """ # Do nothing if there is no rule storage or it is not active storage = queryUtility(IRuleStorage) if storage is None or not storage.active: return init() rule_filter = _status.rule_filter # Stop if someone else is already executing. This could happen if, # for example, a rule triggered here caused another event to be fired. if rule_filter.in_progress: return # Tell other event handlers to be equally kind rule_filter.in_progress = True # Prepare to break hard if a rule demanded execution be stopped try: # Try to execute rules in the context. It may not work if the context # is not a rule executor, but we may still want to bubble events executor = IRuleExecutor(context, None) if executor is not None: executor(event, bubbled=False, rule_filter=rule_filter) # Do not bubble beyond the site root if not ISiteRoot.providedBy(context): parent = aq_parent(aq_inner(context)) while parent is not None: executor = IRuleExecutor(parent, None) if executor is not None: executor(event, bubbled=True, rule_filter=rule_filter) if ISiteRoot.providedBy(parent): parent = None else: parent = aq_parent(aq_inner(parent)) except StopRule: pass # We are done - other events that occur after this one will be allowed to # execute rules again rule_filter.in_progress = False | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def execute(context, event): """Execute all rules relative to the context, and bubble as appropriate. """ # Do nothing if there is no rule storage or it is not active storage = queryUtility(IRuleStorage) if storage is None or not storage.active: return init() rule_filter = _status.rule_filter # Stop if someone else is already executing. This could happen if, # for example, a rule triggered here caused another event to be fired. if rule_filter.in_progress: return # Tell other event handlers to be equally kind rule_filter.in_progress = True # Prepare to break hard if a rule demanded execution be stopped try: # Try to execute rules in the context. It may not work if the context # is not a rule executor, but we may still want to bubble events executor = IRuleExecutor(context, None) if executor is not None: executor(event, bubbled=False, rule_filter=rule_filter) # Do not bubble beyond the site root if not ISiteRoot.providedBy(context): parent = aq_parent(aq_inner(context)) while parent is not None: executor = IRuleExecutor(parent, None) if executor is not None: executor(event, bubbled=True, rule_filter=rule_filter) if ISiteRoot.providedBy(parent): parent = None else: parent = aq_parent(aq_inner(parent)) except StopRule: pass # We are done - other events that occur after this one will be allowed to # execute rules again rule_filter.in_progress = False | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def execute(context, event): """Execute all rules relative to the context, and bubble as appropriate. """ # Do nothing if there is no rule storage or it is not active storage = queryUtility(IRuleStorage) if storage is None or not storage.active: return init() rule_filter = _status.rule_filter # Stop if someone else is already executing. This could happen if, # for example, a rule triggered here caused another event to be fired. if rule_filter.in_progress: return # Tell other event handlers to be equally kind rule_filter.in_progress = True # Prepare to break hard if a rule demanded execution be stopped try: # Try to execute rules in the context. It may not work if the context # is not a rule executor, but we may still want to bubble events executor = IRuleExecutor(context, None) if executor is not None: executor(event, bubbled=False, rule_filter=rule_filter) # Do not bubble beyond the site root if not ISiteRoot.providedBy(context): parent = aq_parent(aq_inner(context)) while parent is not None: executor = IRuleExecutor(parent, None) if executor is not None: executor(event, bubbled=True, rule_filter=rule_filter) if ISiteRoot.providedBy(parent): parent = None else: parent = aq_parent(aq_inner(parent)) except StopRule: pass # We are done - other events that occur after this one will be allowed to # execute rules again rule_filter.in_progress = False | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def execute(context, event): """Execute all rules relative to the context, and bubble as appropriate. """ # Do nothing if there is no rule storage or it is not active storage = queryUtility(IRuleStorage) if storage is None or not storage.active: return init() rule_filter = _status.rule_filter # Stop if someone else is already executing. This could happen if, # for example, a rule triggered here caused another event to be fired. if rule_filter.in_progress: return # Tell other event handlers to be equally kind rule_filter.in_progress = True # Prepare to break hard if a rule demanded execution be stopped try: # Try to execute rules in the context. It may not work if the context # is not a rule executor, but we may still want to bubble events executor = IRuleExecutor(context, None) if executor is not None: executor(event, bubbled=False, rule_filter=rule_filter) # Do not bubble beyond the site root if not ISiteRoot.providedBy(context): parent = aq_parent(aq_inner(context)) while parent is not None: executor = IRuleExecutor(parent, None) if executor is not None: executor(event, bubbled=True, rule_filter=rule_filter) if ISiteRoot.providedBy(parent): parent = None else: parent = aq_parent(aq_inner(parent)) except StopRule: pass # We are done - other events that occur after this one will be allowed to # execute rules again rule_filter.in_progress = False | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def execute(context, event): """Execute all rules relative to the context, and bubble as appropriate. """ # Do nothing if there is no rule storage or it is not active storage = queryUtility(IRuleStorage) if storage is None or not storage.active: return init() rule_filter = _status.rule_filter # Stop if someone else is already executing. This could happen if, # for example, a rule triggered here caused another event to be fired. if rule_filter.in_progress: return # Tell other event handlers to be equally kind rule_filter.in_progress = True # Prepare to break hard if a rule demanded execution be stopped try: # Try to execute rules in the context. It may not work if the context # is not a rule executor, but we may still want to bubble events executor = IRuleExecutor(context, None) if executor is not None: executor(event, bubbled=False, rule_filter=rule_filter) # Do not bubble beyond the site root if not ISiteRoot.providedBy(context): parent = aq_parent(aq_inner(context)) while parent is not None: executor = IRuleExecutor(parent, None) if executor is not None: executor(event, bubbled=True, rule_filter=rule_filter) if ISiteRoot.providedBy(parent): parent = None else: parent = aq_parent(aq_inner(parent)) except StopRule: pass # We are done - other events that occur after this one will be allowed to # execute rules again rule_filter.in_progress = False | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def execute(context, event): """Execute all rules relative to the context, and bubble as appropriate. """ # Do nothing if there is no rule storage or it is not active storage = queryUtility(IRuleStorage) if storage is None or not storage.active: return init() rule_filter = _status.rule_filter # Stop if someone else is already executing. This could happen if, # for example, a rule triggered here caused another event to be fired. if rule_filter.in_progress: return # Tell other event handlers to be equally kind rule_filter.in_progress = True # Prepare to break hard if a rule demanded execution be stopped try: # Try to execute rules in the context. It may not work if the context # is not a rule executor, but we may still want to bubble events executor = IRuleExecutor(context, None) if executor is not None: executor(event, bubbled=False, rule_filter=rule_filter) # Do not bubble beyond the site root if not ISiteRoot.providedBy(context): parent = aq_parent(aq_inner(context)) while parent is not None: executor = IRuleExecutor(parent, None) if executor is not None: executor(event, bubbled=True, rule_filter=rule_filter) if ISiteRoot.providedBy(parent): parent = None else: parent = aq_parent(aq_inner(parent)) except StopRule: pass # We are done - other events that occur after this one will be allowed to # execute rules again rule_filter.in_progress = False | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def execute(context, event): """Execute all rules relative to the context, and bubble as appropriate. """ # Do nothing if there is no rule storage or it is not active storage = queryUtility(IRuleStorage) if storage is None or not storage.active: return init() rule_filter = _status.rule_filter # Stop if someone else is already executing. This could happen if, # for example, a rule triggered here caused another event to be fired. if rule_filter.in_progress: return # Tell other event handlers to be equally kind rule_filter.in_progress = True # Prepare to break hard if a rule demanded execution be stopped try: # Try to execute rules in the context. It may not work if the context # is not a rule executor, but we may still want to bubble events executor = IRuleExecutor(context, None) if executor is not None: executor(event, bubbled=False, rule_filter=rule_filter) # Do not bubble beyond the site root if not ISiteRoot.providedBy(context): parent = aq_parent(aq_inner(context)) while parent is not None: executor = IRuleExecutor(parent, None) if executor is not None: executor(event, bubbled=True, rule_filter=rule_filter) if ISiteRoot.providedBy(parent): parent = None else: parent = aq_parent(aq_inner(parent)) except StopRule: pass # We are done - other events that occur after this one will be allowed to # execute rules again rule_filter.in_progress = False | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def execute(context, event): """Execute all rules relative to the context, and bubble as appropriate. """ # Do nothing if there is no rule storage or it is not active storage = queryUtility(IRuleStorage) if storage is None or not storage.active: return init() rule_filter = _status.rule_filter # Stop if someone else is already executing. This could happen if, # for example, a rule triggered here caused another event to be fired. if rule_filter.in_progress: return # Tell other event handlers to be equally kind rule_filter.in_progress = True # Prepare to break hard if a rule demanded execution be stopped try: # Try to execute rules in the context. It may not work if the context # is not a rule executor, but we may still want to bubble events executor = IRuleExecutor(context, None) if executor is not None: executor(event, bubbled=False, rule_filter=rule_filter) # Do not bubble beyond the site root if not ISiteRoot.providedBy(context): parent = aq_parent(aq_inner(context)) while parent is not None: executor = IRuleExecutor(parent, None) if executor is not None: executor(event, bubbled=True, rule_filter=rule_filter) if ISiteRoot.providedBy(parent): parent = None else: parent = aq_parent(aq_inner(parent)) except StopRule: pass # We are done - other events that occur after this one will be allowed to # execute rules again rule_filter.in_progress = False | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def execute(context, event): """Execute all rules relative to the context, and bubble as appropriate. """ # Do nothing if there is no rule storage or it is not active storage = queryUtility(IRuleStorage) if storage is None or not storage.active: return init() rule_filter = _status.rule_filter # Stop if someone else is already executing. This could happen if, # for example, a rule triggered here caused another event to be fired. if rule_filter.in_progress: return # Tell other event handlers to be equally kind rule_filter.in_progress = True # Prepare to break hard if a rule demanded execution be stopped try: # Try to execute rules in the context. It may not work if the context # is not a rule executor, but we may still want to bubble events executor = IRuleExecutor(context, None) if executor is not None: executor(event, bubbled=False, rule_filter=rule_filter) # Do not bubble beyond the site root if not ISiteRoot.providedBy(context): parent = aq_parent(aq_inner(context)) while parent is not None: executor = IRuleExecutor(parent, None) if executor is not None: executor(event, bubbled=True, rule_filter=rule_filter) if ISiteRoot.providedBy(parent): parent = None else: parent = aq_parent(aq_inner(parent)) except StopRule: pass # We are done - other events that occur after this one will be allowed to # execute rules again rule_filter.in_progress = False | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def execute(context, event): """Execute all rules relative to the context, and bubble as appropriate. """ # Do nothing if there is no rule storage or it is not active storage = queryUtility(IRuleStorage) if storage is None or not storage.active: return init() rule_filter = _status.rule_filter # Stop if someone else is already executing. This could happen if, # for example, a rule triggered here caused another event to be fired. if rule_filter.in_progress: return # Tell other event handlers to be equally kind rule_filter.in_progress = True # Prepare to break hard if a rule demanded execution be stopped try: # Try to execute rules in the context. It may not work if the context # is not a rule executor, but we may still want to bubble events executor = IRuleExecutor(context, None) if executor is not None: executor(event, bubbled=False, rule_filter=rule_filter) # Do not bubble beyond the site root if not ISiteRoot.providedBy(context): parent = aq_parent(aq_inner(context)) while parent is not None: executor = IRuleExecutor(parent, None) if executor is not None: executor(event, bubbled=True, rule_filter=rule_filter) if ISiteRoot.providedBy(parent): parent = None else: parent = aq_parent(aq_inner(parent)) except StopRule: pass # We are done - other events that occur after this one will be allowed to # execute rules again rule_filter.in_progress = False | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def added(event): """When an object is added, execute rules assigned to its new parent. There is special handling for Archetypes objects. """ if is_portal_factory(event.object): return # The object added event executes too early for Archetypes objects. # We need to delay execution until we receive a subsequent IObjectInitializedEvent if not IBaseObject.providedBy(event.object): execute(event.newParent, event) else: init() _status.delayed_events[IObjectInitializedEvent] = event | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def added(event): """When an object is added, execute rules assigned to its new parent. There is special handling for Archetypes objects. """ if is_portal_factory(event.object): return # The object added event executes too early for Archetypes objects. # We need to delay execution until we receive a subsequent IObjectInitializedEvent if not IBaseObject.providedBy(event.object): execute(event.newParent, event) else: init() _status.delayed_events[IObjectInitializedEvent] = event | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def archetypes_initialized(event): """Pick up the delayed IObjectAddedEvent when an Archetypes object is initialised. """ if is_portal_factory(event.object): return if not IBaseObject.providedBy(event.object): return init() delayed_event = _status.delayed_events.get(IObjectInitializedEvent, None) if delayed_event is not None: _status.delayed_events[IObjectInitializedEvent] = None execute(delayed_event.newParent, delayed_event) | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def archetypes_initialized(event): """Pick up the delayed IObjectAddedEvent when an Archetypes object is initialised. """ if is_portal_factory(event.object): return if not IBaseObject.providedBy(event.object): return init() delayed_event = _status.delayed_events.get(IObjectInitializedEvent, None) if delayed_event is not None: _status.delayed_events[IObjectInitializedEvent] = None execute(delayed_event.newParent, delayed_event) | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def removed(event): """When an IObjectRemevedEvent was received, execute rules assigned to its previous parent. """ if is_portal_factory(event.object): return execute(event.oldParent, event) | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def removed(event): """When an IObjectRemevedEvent was received, execute rules assigned to its previous parent. """ if is_portal_factory(event.object): return execute(event.oldParent, event) | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
||
def removed(event): """When an IObjectRemevedEvent was received, execute rules assigned to its previous parent. """ if is_portal_factory(event.object): return execute(event.oldParent, event) | 4b066402847119c3b3c98e8c95b539d7d86015e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12184/4b066402847119c3b3c98e8c95b539d7d86015e2/handlers.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.