rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
self._source_dir = layout_package.path_utils.get_absolute_path( self._source_dir)
def __init__(self, options, args, test): # The known list of tests. # Recognise the original abbreviations as well as full executable names. self._test_list = { "base": self.TestBase, "base_unittests": self.TestBase, "browser": self.TestBrowser, "browser_tests": self.TestBrowser, "googleurl": self.TestGURL, "googleurl_unittests": self.TestGURL, "ipc": self.TestIpc, "ipc_tests": self.TestIpc, "layout": self.TestLayout, "layout_tests": self.TestLayout, "media": self.TestMedia, "media_unittests": self.TestMedia, "net": self.TestNet, "net_unittests": self.TestNet, "printing": self.TestPrinting, "printing_unittests": self.TestPrinting, "startup": self.TestStartup, "startup_tests": self.TestStartup, "sync": self.TestSync, "sync_unit_tests": self.TestSync, "test_shell": self.TestTestShell, "test_shell_tests": self.TestTestShell, "ui": self.TestUI, "ui_tests": self.TestUI, "unit": self.TestUnit, "unit_tests": self.TestUnit, "app": self.TestApp, "app_unittests": self.TestApp, }
826716cf8f80fdf2d67e0d427ed89b309427e368 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5060/826716cf8f80fdf2d67e0d427ed89b309427e368/chrome_tests.py
def TestLayoutChunk(self, chunk_num, chunk_size): # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the # list of tests. Wrap around to beginning of list at end. # If chunk_size is zero, run all tests in the list once. # If a text file is given as argument, it is used as the list of tests. # # Build the ginormous commandline in 'cmd'. # It's going to be roughly # python valgrind_test.py ... python run-chromium-webkit-tests ... # but we'll use the --indirect flag to valgrind_test.py # to avoid valgrinding python. # Start by building the valgrind_test.py commandline. cmd = self._DefaultCommand("webkit") cmd.append("--trace_children") cmd.append("--indirect") # Now build script_cmd, the run_chromium-webkits_tests commandline # Store each chunk in its own directory so that we can find the data later chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num) test_shell = os.path.join(self._options.build_dir, "test_shell") out_dir = os.path.join(google.path_utils.ScriptDir(), "latest") out_dir = os.path.join(out_dir, chunk_dir) if os.path.exists(out_dir): old_files = glob.glob(os.path.join(out_dir, "*.txt")) for f in old_files: os.remove(f) else: os.makedirs(out_dir) script = os.path.join(self._source_dir, "third_party", "WebKit", "WebKitTools", "Scripts", "run-chromium-webkit-tests") script_cmd = ["python", script, "--run-singly", "-v", "--noshow-results", "--time-out-ms=200000", "--nocheck-sys-deps"] # Pass build mode to run-chromium-webkit-tests. We aren't passed it # directly, so parse it out of build_dir. run-chromium-webkit-tests # can only handle the two values "Release" and "Debug". # TODO(Hercules): unify how all our scripts pass around build mode # (--mode / --target / --build_dir / --debug) if self._options.build_dir.endswith("Debug"): script_cmd.append("--debug"); if (chunk_size > 0): script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) if len(self._args): # if the arg is a txt file, then treat it as a list of tests if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": script_cmd.append("--test-list=%s" % self._args[0]) else: script_cmd.extend(self._args) self._ReadGtestFilterFile("layout", script_cmd) # Now run script_cmd with the wrapper in cmd cmd.extend(["--"]) cmd.extend(script_cmd) return valgrind_test.RunTool(cmd, "layout")
826716cf8f80fdf2d67e0d427ed89b309427e368 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5060/826716cf8f80fdf2d67e0d427ed89b309427e368/chrome_tests.py
def TestLayoutChunk(self, chunk_num, chunk_size): # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the # list of tests. Wrap around to beginning of list at end. # If chunk_size is zero, run all tests in the list once. # If a text file is given as argument, it is used as the list of tests. # # Build the ginormous commandline in 'cmd'. # It's going to be roughly # python valgrind_test.py ... python run-chromium-webkit-tests ... # but we'll use the --indirect flag to valgrind_test.py # to avoid valgrinding python. # Start by building the valgrind_test.py commandline. cmd = self._DefaultCommand("webkit") cmd.append("--trace_children") cmd.append("--indirect") # Now build script_cmd, the run_chromium-webkits_tests commandline # Store each chunk in its own directory so that we can find the data later chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num) test_shell = os.path.join(self._options.build_dir, "test_shell") out_dir = os.path.join(google.path_utils.ScriptDir(), "latest") out_dir = os.path.join(out_dir, chunk_dir) if os.path.exists(out_dir): old_files = glob.glob(os.path.join(out_dir, "*.txt")) for f in old_files: os.remove(f) else: os.makedirs(out_dir) script = os.path.join(self._source_dir, "third_party", "WebKit", "WebKitTools", "Scripts", "run-chromium-webkit-tests") script_cmd = ["python", script, "--run-singly", "-v", "--noshow-results", "--time-out-ms=200000", "--nocheck-sys-deps"] # Pass build mode to run-chromium-webkit-tests. We aren't passed it # directly, so parse it out of build_dir. run-chromium-webkit-tests # can only handle the two values "Release" and "Debug". # TODO(Hercules): unify how all our scripts pass around build mode # (--mode / --target / --build_dir / --debug) if self._options.build_dir.endswith("Debug"): script_cmd.append("--debug"); if (chunk_size > 0): script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) if len(self._args): # if the arg is a txt file, then treat it as a list of tests if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": script_cmd.append("--test-list=%s" % self._args[0]) else: script_cmd.extend(self._args) self._ReadGtestFilterFile("layout", script_cmd) # Now run script_cmd with the wrapper in cmd cmd.extend(["--"]) cmd.extend(script_cmd) return valgrind_test.RunTool(cmd, "layout")
826716cf8f80fdf2d67e0d427ed89b309427e368 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5060/826716cf8f80fdf2d67e0d427ed89b309427e368/chrome_tests.py
script = os.path.join(self._source_dir, "third_party", "WebKit", "WebKitTools", "Scripts", "run-chromium-webkit-tests")
script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests", "run_webkit_tests.py")
def TestLayoutChunk(self, chunk_num, chunk_size): # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the # list of tests. Wrap around to beginning of list at end. # If chunk_size is zero, run all tests in the list once. # If a text file is given as argument, it is used as the list of tests. # # Build the ginormous commandline in 'cmd'. # It's going to be roughly # python valgrind_test.py ... python run-chromium-webkit-tests ... # but we'll use the --indirect flag to valgrind_test.py # to avoid valgrinding python. # Start by building the valgrind_test.py commandline. cmd = self._DefaultCommand("webkit") cmd.append("--trace_children") cmd.append("--indirect") # Now build script_cmd, the run_chromium-webkits_tests commandline # Store each chunk in its own directory so that we can find the data later chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num) test_shell = os.path.join(self._options.build_dir, "test_shell") out_dir = os.path.join(google.path_utils.ScriptDir(), "latest") out_dir = os.path.join(out_dir, chunk_dir) if os.path.exists(out_dir): old_files = glob.glob(os.path.join(out_dir, "*.txt")) for f in old_files: os.remove(f) else: os.makedirs(out_dir) script = os.path.join(self._source_dir, "third_party", "WebKit", "WebKitTools", "Scripts", "run-chromium-webkit-tests") script_cmd = ["python", script, "--run-singly", "-v", "--noshow-results", "--time-out-ms=200000", "--nocheck-sys-deps"] # Pass build mode to run-chromium-webkit-tests. We aren't passed it # directly, so parse it out of build_dir. run-chromium-webkit-tests # can only handle the two values "Release" and "Debug". # TODO(Hercules): unify how all our scripts pass around build mode # (--mode / --target / --build_dir / --debug) if self._options.build_dir.endswith("Debug"): script_cmd.append("--debug"); if (chunk_size > 0): script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) if len(self._args): # if the arg is a txt file, then treat it as a list of tests if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": script_cmd.append("--test-list=%s" % self._args[0]) else: script_cmd.extend(self._args) self._ReadGtestFilterFile("layout", script_cmd) # Now run script_cmd with the wrapper in cmd cmd.extend(["--"]) cmd.extend(script_cmd) return valgrind_test.RunTool(cmd, "layout")
826716cf8f80fdf2d67e0d427ed89b309427e368 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5060/826716cf8f80fdf2d67e0d427ed89b309427e368/chrome_tests.py
def TestLayoutChunk(self, chunk_num, chunk_size): # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the # list of tests. Wrap around to beginning of list at end. # If chunk_size is zero, run all tests in the list once. # If a text file is given as argument, it is used as the list of tests. # # Build the ginormous commandline in 'cmd'. # It's going to be roughly # python valgrind_test.py ... python run-chromium-webkit-tests ... # but we'll use the --indirect flag to valgrind_test.py # to avoid valgrinding python. # Start by building the valgrind_test.py commandline. cmd = self._DefaultCommand("webkit") cmd.append("--trace_children") cmd.append("--indirect") # Now build script_cmd, the run_chromium-webkits_tests commandline # Store each chunk in its own directory so that we can find the data later chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num) test_shell = os.path.join(self._options.build_dir, "test_shell") out_dir = os.path.join(google.path_utils.ScriptDir(), "latest") out_dir = os.path.join(out_dir, chunk_dir) if os.path.exists(out_dir): old_files = glob.glob(os.path.join(out_dir, "*.txt")) for f in old_files: os.remove(f) else: os.makedirs(out_dir) script = os.path.join(self._source_dir, "third_party", "WebKit", "WebKitTools", "Scripts", "run-chromium-webkit-tests") script_cmd = ["python", script, "--run-singly", "-v", "--noshow-results", "--time-out-ms=200000", "--nocheck-sys-deps"] # Pass build mode to run-chromium-webkit-tests. We aren't passed it # directly, so parse it out of build_dir. run-chromium-webkit-tests # can only handle the two values "Release" and "Debug". # TODO(Hercules): unify how all our scripts pass around build mode # (--mode / --target / --build_dir / --debug) if self._options.build_dir.endswith("Debug"): script_cmd.append("--debug"); if (chunk_size > 0): script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) if len(self._args): # if the arg is a txt file, then treat it as a list of tests if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": script_cmd.append("--test-list=%s" % self._args[0]) else: script_cmd.extend(self._args) self._ReadGtestFilterFile("layout", script_cmd) # Now run script_cmd with the wrapper in cmd cmd.extend(["--"]) cmd.extend(script_cmd) return valgrind_test.RunTool(cmd, "layout")
826716cf8f80fdf2d67e0d427ed89b309427e368 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5060/826716cf8f80fdf2d67e0d427ed89b309427e368/chrome_tests.py
os.path.join(chrome_src, 'chrome', 'Release')],
os.path.join(chrome_src, 'build', 'Debug'), os.path.join(chrome_src, 'chrome', 'Release'), os.path.join(chrome_src, 'build', 'Release')],
def _LocateBinDirs(): """Setup a few dirs where we expect to find dependency libraries.""" script_dir = os.path.dirname(__file__) chrome_src = os.path.join(script_dir, os.pardir, os.pardir, os.pardir) bin_dirs = { 'linux2': [ os.path.join(chrome_src, 'out', 'Debug'), os.path.join(chrome_src, 'sconsbuild', 'Debug'), os.path.join(chrome_src, 'out', 'Release'), os.path.join(chrome_src, 'sconsbuild', 'Release')], 'darwin': [ os.path.join(chrome_src, 'xcodebuild', 'Debug'), os.path.join(chrome_src, 'xcodebuild', 'Release')], 'win32': [ os.path.join(chrome_src, 'chrome', 'Debug'), os.path.join(chrome_src, 'chrome', 'Release')], 'cygwin': [ os.path.join(chrome_src, 'chrome', 'Debug'), os.path.join(chrome_src, 'chrome', 'Release')], } deps_dirs = [ os.path.join(script_dir, os.pardir, os.pardir, os.pardir, 'third_party'), script_dir, ] sys.path += map(os.path.normpath, bin_dirs.get(sys.platform, []) + deps_dirs)
5ef62836924fcbc86b81b6988203cc6bb9025274 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5060/5ef62836924fcbc86b81b6988203cc6bb9025274/pyauto.py
BASE_DIRECTORY = BASE_DIRECTORY.lower()
def main(argv): usage = """Usage: python %prog [--root <root>] [tocheck] tocheck Specifies the directory, relative to root, to check. This defaults to "." so it checks everything.
2af82b1d4a3b9c163976e54cfbd0f8c35876c9de /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5060/2af82b1d4a3b9c163976e54cfbd0f8c35876c9de/checkperms.py
if match: content = callback + '(' + content + ')'
if match: content = callback + '(' + content.decode('utf-8') + ')'
def get(self): extURL = cgi.escape(self.request.get('extURL')) extMethod = cgi.escape(self.request.get('extMethod')) queryString = cgi.escape(self.request.query_string) queryDict = dict(cgi.parse_qsl(queryString)) callback = cgi.escape(self.request.get('_callback')) if queryString: error = 1 method = urlfetch.POST if extURL: del queryDict['extURL'] if extMethod: del queryDict['extMethod'] m = extMethod.lower() if m == 'put': method = urlfetch.PUT elif m == 'delete': method = urlfetch.DELETE elif m == 'get': method = urlfetch.GET # Huh?? elif m == 'head': method = urlfetch.HEAD # Oh, wait the minute... if len(queryDict): try: data = urllib.urlencode(queryDict) result = urlfetch.fetch(extURL, method=method, payload=data) if result.status_code == 200 or result.status_code == 201: error = 0 self.response.headers['Content-Type'] = 'application/javascript; charset=utf-8' content = result.content if callback: logging.info('Adding callback to JSON') exp = re.compile('^[A-Za-z_$][A-Za-z0-9._$]*?$') match = exp.match(callback) if match: content = callback + '(' + content + ')' self.response.out.write(content) except urlfetch.Error: logging.error('urlfetch error') error = 1 if error: self.response.set_status(400) self.response.out.write('Status: 400 Error parsing URL. There was an error processing your request: Error parsing URL.') else: self.response.out.write(""" <!DOCTYPE html> <title>jsonptunnel</title> <style> body{font-family: helvetica, arial, sans-serif} var{font-weight: bold; font-style: normal;} dt{display: list-item;} dl{margin-left: 40px;} </style> <h1>jsonptunnel</h1> <p>JSONP tunnel for letting you POST to remote services from your client-side JavaScript application and receive JSONP data.</p> <p><a href="http://labs.thinkminimo.com/jsonptunnel/#example">Try it out on the example form</a> and put <strong>http://jsonptunnel.appspot.com/</strong> as the jsonptunnel URL.</p> <p>Or try the following URL: <a href="/?callback=foo&amp;extURL=http://dipert.org/alan/calc.php&amp;num1=1&amp;num2=2">/?callback=foo&amp;extURL=http://dipert.org/alan/calc.php&amp;num1=1&amp;num2=2</a></p> <p>The parameters:</p> <dl> <dt><var>extURL</var></dt> <dd>Indicates the <em>external</em> web service URL. <strong>Required</strong>.</dd> <dt><var>extMethod</var> <em>(experimental)</em></dt> <dd>Indicates the HTTP method to use for the request, such as: <ul> <li>post <em>(default)</em></li> <li>put</li> <li>delete</li> </ul> </dd> <dt>...and any parameters to pass to the web service.</dt> </dl> <p>Inspired by <a href="http://ubergibson.com/">Alan Dipert</a>'s <a href="http://labs.thinkminimo.com/jsonptunnel/">jsonptunnel</a>. <a href="http://jsonptunnel.googlecode.com/">Google Code</a></p> """)
6c0c6127b88c8d22e8a85c061c829495e7b102cd /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4730/6c0c6127b88c8d22e8a85c061c829495e7b102cd/main.py
entry += " '%s', '%s', '%s'))\n" % ( moduleName, 'deprecated', 'deprecated' )
if kind == "Library": entry += " '%s', '%s', '%s'), '%s')\n" % ( moduleName, 'deprecated', 'deprecated', moduleName.upper()) else: entry += " '%s', '%s', '%s'))\n" % ( moduleName, 'deprecated', 'deprecated')
def _appendToProjectsPy(self, moduleName, branchLocation, destination, template):
3925db88d2e61520883dbac95d7c6c3a691b5a25 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8259/3925db88d2e61520883dbac95d7c6c3a691b5a25/CreateModule.py
self.addPluginPath(os.path.join("/usr","lib", "python%d.%d" % (major, minor), "dist-packages", "openwns", "wrowser", "playgroundPlugins"))
self.addPluginPath(os.path.join("/usr","local","lib", "python%d.%d" % (major, minor), "dist-packages", "openwns", "wrowser", "playgroundPlugins"))
def __init__(self): """ Initialization of members. No other functionality. """ usage = "" usage += "The list below shows global available options.\n"
e36edf9b9c57abae39db63edbd5bd7eab0682e0c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8259/e36edf9b9c57abae39db63edbd5bd7eab0682e0c/Core.py
self.field, self.form, self),
self.field, self.form, self.content),
def update(self): value = zope.component.queryMultiAdapter( (self.context, self.request, self.widget, self.field, self.form, self), interfaces.IValue, name='message') if value is not None: self.message = value.get() else: self.message = self.createMessage()
146fdad2add02d557db2e20767788118fb4c9db2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9738/146fdad2add02d557db2e20767788118fb4c9db2/error.py
iface = zope.interface.interface.InterfaceClass( 'IGeneratedForObject_%i' %hash(spec)) zope.interface.alsoProvides(spec, iface) spec = iface
ifaceName = 'IGeneratedForObject_%i' %hash(spec) existingInterfaces = [ i for i in zope.interface.directlyProvidedBy(spec) if i.__name__ == ifaceName ] if len(existingInterfaces) > 0: spec = existingInterfaces[0] else: iface = zope.interface.interface.InterfaceClass(ifaceName) zope.interface.alsoProvides(spec, iface) spec = iface
def getSpecification(spec, force=False): """Get the specification of the given object. If the given object is already a specification acceptable to the component architecture, it is simply returned. This is true for classes and specification objects (which includes interfaces). In case of instances, an interface is generated on the fly and tagged onto the object. Then the interface is returned as the specification. """ # If the specification is an instance, then we do some magic. if (force or (spec is not None and not zope.interface.interfaces.ISpecification.providedBy(spec) and not isinstance(spec, classTypes)) ): # Step 1: Create an interface iface = zope.interface.interface.InterfaceClass( 'IGeneratedForObject_%i' %hash(spec)) # Step 2: Directly-provide the interface on the specification zope.interface.alsoProvides(spec, iface) # Step 3: Make the new interface the specification for this instance spec = iface return spec
be0bba083835a33b27b4b10491d68f7fc8329882 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9738/be0bba083835a33b27b4b10491d68f7fc8329882/util.py
return self.data.get(self.field.__name__, self.field.missing_value)
value = self.data.get(self.field.__name__, _marker) if value is _marker: raise AttributeError return value
def get(self): """See z3c.form.interfaces.IDataManager""" return self.data.get(self.field.__name__, self.field.missing_value)
f6391c0844f6d93d411beb0a137738bfb5fb71a8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9738/f6391c0844f6d93d411beb0a137738bfb5fb71a8/datamanager.py
baseuri+"
baseuri+prefix,
def _construct_ids(self, element, prefix, baseuri, skip_fragments=[], find_definitions = False): find_definitions_recursive = find_definitions counters = defaultdict(int) if isinstance(element, CompoundStructure): # Hitta begreppsdefinitioner if isinstance(element, Paragraf): # kolla om frsta stycket innehller en text som # antyder att definitioner fljer # log.debug("Testing %r against some regexes" % element[0][0]) if self.re_definitions(element[0][0]): find_definitions = "normal" if (self.re_brottsdef(element[0][0]) or self.re_brottsdef_alt(element[0][0])): find_definitions = "brottsrubricering" if self.re_parantesdef(element[0][0]): find_definitions = "parantes" if self.re_loptextdef(element[0][0]): find_definitions = "loptext"
21ee45414c1c3eda58f24ceeca3d664bce6dc2f5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4027/21ee45414c1c3eda58f24ceeca3d664bce6dc2f5/SFS.py
sets = [{'label':'Naive set 1' 'predicate',TEMP['naive1'], 'data',rs1},
sets = [{'label':'Naive set 1', 'predicate':TEMP['naive1'], 'data':rs1},
def prep_annotation_file(self,basefile): print "prep_annotation_file"
78dffbf55cb8ad576cd25c3d3c8a3745589651ab /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4027/78dffbf55cb8ad576cd25c3d3c8a3745589651ab/EurlexTreaties.py
'predicate', TEMP['naive2'], 'data',rs2},
'predicate':TEMP['naive2'], 'data':rs2},
def prep_annotation_file(self,basefile): print "prep_annotation_file"
78dffbf55cb8ad576cd25c3d3c8a3745589651ab /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4027/78dffbf55cb8ad576cd25c3d3c8a3745589651ab/EurlexTreaties.py
'predicate', TEMP['naive3'], 'data',rs3},
'predicate':TEMP['naive3'], 'data':rs3},
def prep_annotation_file(self,basefile): print "prep_annotation_file"
78dffbf55cb8ad576cd25c3d3c8a3745589651ab /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4027/78dffbf55cb8ad576cd25c3d3c8a3745589651ab/EurlexTreaties.py
'predicate', TEMP['naive4'], 'data',rs4}]
'predicate':TEMP['naive4'], 'data':rs4}]
def prep_annotation_file(self,basefile): print "prep_annotation_file"
78dffbf55cb8ad576cd25c3d3c8a3745589651ab /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4027/78dffbf55cb8ad576cd25c3d3c8a3745589651ab/EurlexTreaties.py
pass
def prep_annotation_file(self,basefile): print "prep_annotation_file"
78dffbf55cb8ad576cd25c3d3c8a3745589651ab /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4027/78dffbf55cb8ad576cd25c3d3c8a3745589651ab/EurlexTreaties.py
re_urisegments = re.compile(r'([\w]+://[^/]+/[^\d]*)(\d+:(bih\. |N|)?\d+( s\.\d+|))
re_urisegments = re.compile(r'([\w]+://[^/]+/[^\d]*)(\d+:(bih\.[_ ]|N|)?\d+([_ ]s\.\d+|))
def __str__(self): return repr(self.value)
9d50284969d8486e199b290be7d233578695e282 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4027/9d50284969d8486e199b290be7d233578695e282/LegalRef.py
def parse(self, indata, baseuri="http://rinfo.lagrummet.se/publ/sfs/9999:999#K9P9S9P9",predicate=None): if indata == "": return indata # this actually triggered a bug... # h = hashlib.sha1() # h.update(indata) # print "Called with %r (%s) (%s)" % (indata, h.hexdigest(), self.verbose) self.predicate = predicate self.baseuri = baseuri if baseuri: m = self.re_urisegments.match(baseuri) if m: self.baseuri_attributes = {'baseuri':m.group(1), 'law':m.group(2), 'chapter':m.group(6), 'section':m.group(8), 'piece':m.group(10), 'item':m.group(12)} else: self.baseuri_attributes = {'baseuri':baseuri} else: self.baseuri_attributes = {} # Det r svrt att f EBNF-grammatiken att knna igen # godtyckliga ord som slutar p ett givet suffix (exv # 'bokfringslagen' med suffixet 'lagen'). Drfr frbehandlar # vi indatastrngen och stoppar in ett '|'-tecken innan vissa # suffix. Vi transformerar ven 'Radio- och TV-lagen' till # 'Radio-_och_TV-lagen' fixedindata = self.re_escape_compound.sub(r'\1_\2_\3\4', indata) fixedindata = self.re_escape_named.sub(r'|\1', fixedindata)
9d50284969d8486e199b290be7d233578695e282 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4027/9d50284969d8486e199b290be7d233578695e282/LegalRef.py
level = self.getFormData("level", None) if level is not None: if level=="top": query += ' AND dc_identifier:"http://purl.org/anzsrc/seo/ else: query += ' AND skos_broader:"%s"' % level
def __getSolrData(self): prefix = self.getSearchTerms() if prefix: query = '%(prefix)s OR %(prefix)s*' % { "prefix" : prefix } else: query = "*:*" req = SearchRequest(query) req.addParam("fq", 'item_type:"object"') req.addParam("fq", 'repository_type:"SEO"') req.setParam("fl", "score") req.setParam("sort", "score desc") req.setParam("start", self.getStartIndex()) req.setParam("rows", self.getItemsPerPage()) level = self.getFormData("level", None) if level is not None: if level=="top": #query += " AND skos_hasTopConcept:http*" query += ' AND dc_identifier:"http://purl.org/anzsrc/seo/#division"' else: query += ' AND skos_broader:"%s"' % level try: out = ByteArrayOutputStream() indexer = self.services.getIndexer() indexer.search(req, out) return JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) except Exception, e: self.log.error("Failed to lookup '{}': {}", prefix, str(e)) return JsonConfigHelper()
92e56881ea54a4103911238cb42ed8a8dc69509f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10451/92e56881ea54a4103911238cb42ed8a8dc69509f/lookup.py
def __init__(self, bundleDir, ipswDir, outDir, verbose): self.bundleDir = bundleDir self.ipswDir = ipswDir self.outDir = outDir self.verbose = verbose def fileWithSuffix(self, filePath, suffix): if filePath.lower().endswith('.dmg'): filePath = filePath[:-4] suffix = suffix + '.dmg' return path.join(self.outDir, path.basename(filePath) + suffix) def decrypt_file(self, filePath, iv, key): decrypt_cmd = "xpwntool %s %s -iv %s -k %s" % \ (path.join(self.ipswDir, filePath), self.fileWithSuffix(filePath, '.dec'), iv, key) if self.verbose: print "Unpacking: '%s'" % decrypt_cmd os.system(decrypt_cmd) def patch_file(self, filePath, patchFile): patch_cmd = "bspatch %s %s %s" % \ (self.fileWithSuffix(filePath, '.dec'), self.fileWithSuffix(filePath, '.dec.p'), path.join(self.bundleDir, patchFile)) if self.verbose: print "Patching: '%s'" % patch_cmd os.system(patch_cmd) def diff_llb(self, patch): filePath = patch [ 'File' ] patchFile = patch [ 'Patch' ] encrypt_cmd = "xpwntool %s %s -t %s -xn8824k -iv %s -k %s" % \ (self.fileWithSuffix(filePath, ".dec.ap"), self.fileWithSuffix(filePath, '.ap'), \ path.join(self.ipswDir, filePath) , patch['IV'], patch['Key']) if self.verbose: print "Encrypting LLB: '%s'" % encrypt_cmd os.system(encrypt_cmd) diff_cmd = "bsdiff %s %s %s" % \ (path.join(self.ipswDir, filePath), self.fileWithSuffix(filePath, '.ap'), path.join(self.bundleDir, patchFile)) if self.verbose: print "Diffing LLB: '%s'" % diff_cmd os.system(diff_cmd) def diff_file(self, patch): filePath = patch['File'] patchFile = patch['Patch'] if path.basename(filePath).startswith('LLB'): self.diff_llb(patch) return if 'IV' in patch: orig_suffix = '.dec' ap_suffix = '.dec.ap' else: orig_suffix = '' ap_suffix = '.ap' diff_cmd = "bsdiff %s %s %s" % \ (self.fileWithSuffix(filePath, orig_suffix), self.fileWithSuffix(filePath, ap_suffix), path.join(self.bundleDir, patchFile)) if self.verbose: print "Diffing: '%s'" % diff_cmd os.system(diff_cmd) def decrypt_rootfs(self): key = self.infoPlist['RootFilesystemKey'] dmg = self.infoPlist['RootFilesystem'] vfdecrypt_cmd = "vfdecrypt -i %s -o %s -k %s" % \ (path.join(self.ipswDir, dmg), self.fileWithSuffix(dmg, '.dec'), key) if self.verbose: print "vfdecrypt: '%s'" % vfdecrypt_cmd os.system(vfdecrypt_cmd) mount_cmd = "hdiutil attach %s" % self.fileWithSuffix(dmg, '.dec') if self.verbose: print "hdiutil: '%s'" % mount_cmd os.system(mount_cmd) def fspatch_extract_callback(self, patch): if not 'Patch' in patch: return filePath = patch['File'] mountpoint = path.join('/Volumes', self.infoPlist['RootFilesystemMountVolume']) cp_cmd = "cp %s %s" % (path.join(mountpoint, filePath), self.fileWithSuffix(filePath, "")) if self.verbose: print "cp: '%s'" % cp_cmd os.system(cp_cmd) def mount_ramdisk(self): firmwarePatches = self.infoPlist['FirmwarePatches'] if not 'Restore Ramdisk' in firmwarePatches: return patch = firmwarePatches['Restore Ramdisk'] filePath = patch['File'] mount_cmd = "hdiutil attach %s" % self.fileWithSuffix(filePath, '.dec') if self.verbose: print "hdiutil: '%s'" % mount_cmd os.system(mount_cmd) def fwpatch_decrypt_callback(self, patch, patchKey): if not 'IV' in patch: return self.decrypt_file(patch['File'], patch['IV'], patch['Key']) if 'Patch' in patch: self.patch_file(patch['File'], patch['Patch']) def genpatch_create_callback(self, patch): if 'Patch' in patch: self.diff_file(patch) def fwpatch_create_callback(self, patch, patchKey): self.genpatch_create_callback(patch) def foreach_fwpatch(self, callback): firmwarePatches = self.infoPlist['FirmwarePatches'] for patchKey in firmwarePatches: patch = firmwarePatches[patchKey] callback(patch, patchKey) def foreach_fspatch(self, callback): filesystemPatches = self.infoPlist['FilesystemPatches'] for patchGroupKey in filesystemPatches: patchGroup = filesystemPatches[patchGroupKey] for patch in patchGroup: callback(patch) def rdpatch_extract_callback(self, patch): filePath = patch['File'] ramdiskKey = None for key in ['RestoreRamdiskMountVolume','RamdiskMountVolume']: if key in self.infoPlist: ramdiskKey = key break if not ramdiskKey: return mountpoint = path.join('/Volumes', self.infoPlist[ramdiskKey]) cp_cmd = "cp %s %s" % (path.join(mountpoint, filePath), self.fileWithSuffix(filePath, "")) if self.verbose: print "cp: '%s'" % cp_cmd os.system(cp_cmd) def foreach_rdpatch(self, callback): rdPatches = self.infoPlist['RamdiskPatches'] for rdKey in rdPatches: patch = rdPatches[rdKey] callback(patch) def umount_all(self): for key in ['RamdiskMountVolume', 'RestoreRamdiskMountVolume', 'RootFilesystemMountVolume']: if not key in self.infoPlist: continue mountpoint = path.join('/Volumes', self.infoPlist[key]) umount_cmd = "hdiutil detach %s" % mountpoint if self.verbose: print "Unmount: '%s'" % umount_cmd os.system(umount_cmd) def process_info_plist(self): self.infoPlist = plistlib.readPlist(path.join(self.bundleDir, 'Info.plist')) self.foreach_fwpatch(self.fwpatch_decrypt_callback) self.mount_ramdisk() self.foreach_rdpatch(self.rdpatch_extract_callback) self.decrypt_rootfs() self.foreach_fspatch(self.fspatch_extract_callback) self.umount_all() def create_patch_files(self): self.infoPlist = plistlib.readPlist(path.join(self.bundleDir, 'Info.plist')) self.foreach_fwpatch(self.fwpatch_create_callback) self.foreach_rdpatch(self.genpatch_create_callback) self.foreach_fspatch(self.genpatch_create_callback)
def __init__(self, bundleDir, ipswDir, outDir, verbose, x_opt): self.x_opt = x_opt self.bundleDir = bundleDir self.ipswDir = ipswDir self.outDir = outDir self.verbose = verbose def fileWithSuffix(self, filePath, suffix): if filePath.lower().endswith('.dmg'): filePath = filePath[:-4] suffix = suffix + '.dmg' return path.join(self.outDir, path.basename(filePath) + suffix) def unpack_file(self, filePath): decrypt_cmd = "xpwntool %s %s" % \ (path.join(self.ipswDir, filePath), self.fileWithSuffix(filePath, '.dec')) if self.verbose: print "Unpacking: '%s'" % decrypt_cmd os.system(decrypt_cmd) def decrypt_file(self, filePath, iv, key): decrypt_cmd = "xpwntool %s %s -iv %s -k %s" % \ (path.join(self.ipswDir, filePath), self.fileWithSuffix(filePath, '.dec'), iv, key) if self.verbose: print "Decrypting: '%s'" % decrypt_cmd os.system(decrypt_cmd) def patch_file(self, filePath, patchFile): patch_cmd = "bspatch %s %s %s" % \ (self.fileWithSuffix(filePath, '.dec'), self.fileWithSuffix(filePath, '.dec.p'), path.join(self.bundleDir, patchFile)) if self.verbose: print "Patching: '%s'" % patch_cmd os.system(patch_cmd) def diff_llb(self, patch, x_opt): filePath = patch [ 'File' ] patchFile = patch [ 'Patch' ] encrypt_cmd = "xpwntool %s %s -t %s -x%s -iv %s -k %s" % \ (self.fileWithSuffix(filePath, ".dec.ap"), self.fileWithSuffix(filePath, '.ap'), \ path.join(self.ipswDir, filePath) , x_opt , patch['IV'], patch['Key']) if self.verbose: print "Encrypting LLB: '%s'" % encrypt_cmd os.system(encrypt_cmd) diff_cmd = "bsdiff %s %s %s" % \ (path.join(self.ipswDir, filePath), self.fileWithSuffix(filePath, '.ap'), path.join(self.bundleDir, patchFile)) if self.verbose: print "Diffing LLB: '%s'" % diff_cmd os.system(diff_cmd) def ldid(self, path): ldid_cmd = "ldid -s %s" % path if self.verbose: print "Pseudosigning: '%s'" % ldid_cmd os.system(ldid_cmd) def fuzzy_patch(self, patch, origPath, patchedPath): deltaFile = patch['Pattern'] fzp_cmd = "fuzzy_patcher --fuzz 80 --patch --orig %s --patched %s --delta %s" % \ (origPath, patchedPath, path.join(self.outDir, "_json", deltaFile + ".delta.json")) if self.verbose: print "Fuzzy patching: '%s'" % fzp_cmd os.system(fzp_cmd) if not path.basename(origPath).startswith('asr'): return self.ldid(patchedPath) def diff_file(self, patch, isFirmwarePatch): filePath = patch['File'] patchFile = patch['Patch'] if path.basename(filePath).startswith('LLB') and self.x_opt: self.diff_llb(patch, self.x_opt) return if isFirmwarePatch: orig_suffix = '.dec' ap_suffix = '.dec.ap' else: orig_suffix = '' ap_suffix = '.ap' origPath = self.fileWithSuffix(filePath, orig_suffix) patchedPath = self.fileWithSuffix(filePath, ap_suffix) if 'Pattern' in patch: self.fuzzy_patch(patch, origPath, patchedPath) diff_cmd = "bsdiff %s %s %s" % \ (origPath, patchedPath, path.join(self.bundleDir, patchFile)) if self.verbose: print "Diffing: '%s'" % diff_cmd os.system(diff_cmd) def decrypt_rootfs(self): key = self.infoPlist['RootFilesystemKey'] dmg = self.infoPlist['RootFilesystem'] vfdecrypt_cmd = "vfdecrypt -i %s -o %s -k %s" % \ (path.join(self.ipswDir, dmg), self.fileWithSuffix(dmg, '.dec'), key) if self.verbose: print "vfdecrypt: '%s'" % vfdecrypt_cmd os.system(vfdecrypt_cmd) mount_cmd = "hdiutil attach %s" % self.fileWithSuffix(dmg, '.dec') if self.verbose: print "hdiutil: '%s'" % mount_cmd os.system(mount_cmd) def fspatch_extract_callback(self, patch): if not 'Patch' in patch: return filePath = patch['File'] mountpoint = path.join('/Volumes', self.infoPlist['RootFilesystemMountVolume']) cp_cmd = "cp %s %s" % (path.join(mountpoint, filePath), self.fileWithSuffix(filePath, "")) if self.verbose: print "cp: '%s'" % cp_cmd os.system(cp_cmd) def mount_ramdisk(self): firmwarePatches = self.infoPlist['FirmwarePatches'] if not 'Restore Ramdisk' in firmwarePatches: return patch = firmwarePatches['Restore Ramdisk'] filePath = patch['File'] mount_cmd = "hdiutil attach %s" % self.fileWithSuffix(filePath, '.dec') if self.verbose: print "hdiutil: '%s'" % mount_cmd os.system(mount_cmd) def fwpatch_decrypt_callback(self, patch, patchKey): if not 'IV' in patch: self.unpack_file(patch['File']) else: self.decrypt_file(patch['File'], patch['IV'], patch['Key']) if 'Patch' in patch: self.patch_file(patch['File'], patch['Patch']) def genpatch_create_callback(self, patch): if 'Patch' in patch: self.diff_file(patch, isFirmwarePatch = False) def fwpatch_create_callback(self, patch, patchKey): if 'Patch' in patch: self.diff_file(patch, isFirmwarePatch = True) def foreach_fwpatch(self, callback): firmwarePatches = self.infoPlist['FirmwarePatches'] for patchKey in firmwarePatches: patch = firmwarePatches[patchKey] callback(patch, patchKey) def foreach_fspatch(self, callback): filesystemPatches = self.infoPlist['FilesystemPatches'] for patchGroupKey in filesystemPatches: patchGroup = filesystemPatches[patchGroupKey] for patch in patchGroup: callback(patch) def rdpatch_extract_callback(self, patch): filePath = patch['File'] ramdiskKey = None for key in ['RestoreRamdiskMountVolume','RamdiskMountVolume']: if key in self.infoPlist: ramdiskKey = key break if not ramdiskKey: return mountpoint = path.join('/Volumes', self.infoPlist[ramdiskKey]) cp_cmd = "cp %s %s" % (path.join(mountpoint, filePath), self.fileWithSuffix(filePath, "")) if self.verbose: print "cp: '%s'" % cp_cmd os.system(cp_cmd) def foreach_rdpatch(self, callback): rdPatches = self.infoPlist['RamdiskPatches'] for rdKey in rdPatches: patch = rdPatches[rdKey] callback(patch) def umount_all(self): for key in ['RamdiskMountVolume', 'RestoreRamdiskMountVolume', 'RootFilesystemMountVolume']: if not key in self.infoPlist: continue mountpoint = path.join('/Volumes', self.infoPlist[key]) umount_cmd = "hdiutil detach %s" % mountpoint if self.verbose: print "Unmount: '%s'" % umount_cmd os.system(umount_cmd) def process_info_plist(self): self.infoPlist = plistlib.readPlist(path.join(self.bundleDir, 'Info.plist')) self.foreach_fwpatch(self.fwpatch_decrypt_callback) self.mount_ramdisk() self.foreach_rdpatch(self.rdpatch_extract_callback) self.decrypt_rootfs() self.foreach_fspatch(self.fspatch_extract_callback) self.umount_all() def create_patch_files(self): self.infoPlist = plistlib.readPlist(path.join(self.bundleDir, 'Info.plist')) self.foreach_fwpatch(self.fwpatch_create_callback) self.foreach_rdpatch(self.genpatch_create_callback) self.foreach_fspatch(self.genpatch_create_callback)
def __init__(self, bundleDir, ipswDir, outDir, verbose): self.bundleDir = bundleDir self.ipswDir = ipswDir self.outDir = outDir self.verbose = verbose
6bbaf82ec3652ee33831edd278e75b7ce0962e3e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14010/6bbaf82ec3652ee33831edd278e75b7ce0962e3e/ipsw.py
parser = OptionParser() parser.add_option("-b", "--bundle", dest="bundle", help="Bundle directory to use", metavar="BUNDLE_DIR") parser.add_option("-i", "--ipsw", dest="ipsw", help="Unpacked IPSW directory", metavar="IPSW_DIR") parser.add_option("-o", "--out", dest="out", help="Output directory", metavar="OUT_DIR") parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose mode") parser.add_option("-c", "--create", dest="create", action="store_true", default=False, help="Create patch files from work dir") (opts, args) = parser.parse_args() requiredOpts = ['bundle', 'ipsw', 'out'] for req in requiredOpts: if not opts.__dict__[req]: print "'%s' argument is mandatory!" % req exit(1) bundleParser = BundleParser( opts.bundle, opts.ipsw, opts.out, opts.verbose ) if opts.create: bundleParser.create_patch_files() else: bundleParser.process_info_plist()
parser = OptionParser() parser.add_option("-b", "--bundle", dest="bundle", help="Bundle directory to use", metavar="BUNDLE_DIR") parser.add_option("-i", "--ipsw", dest="ipsw", help="Unpacked IPSW directory", metavar="IPSW_DIR") parser.add_option("-o", "--out", dest="out", help="Output directory", metavar="OUT_DIR") parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose mode") parser.add_option("-c", "--create", dest="create", action="store_true", default=False, help="Create patch files from work dir") parser.add_option("-x", "--llbexploit", dest="x_opt", default=None, help="Type of LLB exploit to use, n8824k or 24k") (opts, args) = parser.parse_args() requiredOpts = ['bundle', 'ipsw', 'out'] for req in requiredOpts: if not opts.__dict__[req]: print "'%s' argument is mandatory!" % req exit(1) bundleParser = BundleParser( opts.bundle, opts.ipsw, opts.out, opts.verbose, opts.x_opt) if opts.create: bundleParser.create_patch_files() else: bundleParser.process_info_plist()
def main(): parser = OptionParser() parser.add_option("-b", "--bundle", dest="bundle", help="Bundle directory to use", metavar="BUNDLE_DIR") parser.add_option("-i", "--ipsw", dest="ipsw", help="Unpacked IPSW directory", metavar="IPSW_DIR") parser.add_option("-o", "--out", dest="out", help="Output directory", metavar="OUT_DIR") parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose mode") parser.add_option("-c", "--create", dest="create", action="store_true", default=False, help="Create patch files from work dir") (opts, args) = parser.parse_args() requiredOpts = ['bundle', 'ipsw', 'out'] for req in requiredOpts: if not opts.__dict__[req]: print "'%s' argument is mandatory!" % req exit(1) bundleParser = BundleParser( opts.bundle, opts.ipsw, opts.out, opts.verbose ) if opts.create: bundleParser.create_patch_files() else: bundleParser.process_info_plist()
6bbaf82ec3652ee33831edd278e75b7ce0962e3e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14010/6bbaf82ec3652ee33831edd278e75b7ce0962e3e/ipsw.py
kpatches = diff_kernel(sys.argv[3], sys.argv[4]) ibss = ibss_default_patches(sys.argv[1], sys.argv[2]) ibss_add_kpf(ibss, sys.argv[5]) ibss_add_kpatches(ibss, kpatches)
if len(sys.argv) < 6: print "Usage: ibss_patcher ibss_decrypted_orig ibss_out kernelcache_decrypted_orig kernelcache_decrypted_patched ibss_patchproc.bin" exit(1) kpatches = diff_kernel(sys.argv[3], sys.argv[4]) ibss = ibss_default_patches(sys.argv[1], sys.argv[2]) ibss_add_kpf(ibss, sys.argv[5]) ibss_add_kpatches(ibss, kpatches)
def byte_search(image, bytes): for i in range(0, len(image) - len(bytes), 2): if image[i:i+len(bytes)] == bytes: return i return -1
b77b0fe1d965b627bf4bdd66b9aa0556fe154e8a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14010/b77b0fe1d965b627bf4bdd66b9aa0556fe154e8a/ibss_patcher.py
leftOfBox = xPt[ind-1]
leftOfBox = self.xPts[ind-1]
def value(self): "preserve volume during the binning" totalVol = 0 for ind,xPt,yPt in zip(range(self.numPts), self.xPts, self.yPts): #get leftOfBox if ind == 0: if self.leftSide < xPt: leftOfBox = self.leftSide else: leftOfBox = xPt else: leftOfBox = xPt[ind-1] #get rightOfBox if ind == (self.numPts-1): if self.rightSide > xPt: rightOfBox = self.rightSide else: rightOfBox = xPt else: rightOfBox=xPt[ind+1] boxLength = rightOfBox-leftOfBox boxVol = boxLength*yPt totalVol += boxVol totalHeight = totalVol/self.spacing return totalHeight
def0e7c7c6d0e42909a4dc36014312c805680c64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6231/def0e7c7c6d0e42909a4dc36014312c805680c64/SimpleRebin.py
rightOfBox=xPt[ind+1]
rightOfBox = self.xPts[ind+1]
def value(self): "preserve volume during the binning" totalVol = 0 for ind,xPt,yPt in zip(range(self.numPts), self.xPts, self.yPts): #get leftOfBox if ind == 0: if self.leftSide < xPt: leftOfBox = self.leftSide else: leftOfBox = xPt else: leftOfBox = xPt[ind-1] #get rightOfBox if ind == (self.numPts-1): if self.rightSide > xPt: rightOfBox = self.rightSide else: rightOfBox = xPt else: rightOfBox=xPt[ind+1] boxLength = rightOfBox-leftOfBox boxVol = boxLength*yPt totalVol += boxVol totalHeight = totalVol/self.spacing return totalHeight
def0e7c7c6d0e42909a4dc36014312c805680c64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6231/def0e7c7c6d0e42909a4dc36014312c805680c64/SimpleRebin.py
self.value = 0.0
def __init__(self, leftSide=None, spacing=None, numXPointsToBin=None): self.numXPointsToBin = numXPointsToBin self.value = 0.0 self.xPts = [] self.yPts = [] self.leftSide = leftSide self.spacing = spacing
def0e7c7c6d0e42909a4dc36014312c805680c64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6231/def0e7c7c6d0e42909a4dc36014312c805680c64/SimpleRebin.py
if xPt >= currentBin.rightSide:
while xPt >= currentBin.rightSide:
def __init__(self, xDataP, yDataP, newBins=None, binValue = 'countPoints', format = 'columns'): xData = np.sort(xDataP) sortedInds = np.argsort(xDataP) yData = yDataP[sortedInds] self.bins=[] if type(newBins) is type(1): # this algorithm is for data that has already been binned and we're going over the bins to rebin import math leftOverPts, numXdataInBin = math.modf(len(xData)/len(newBins)) currentBin = Bin(numXPointsToBin = int(numXdataInBin)) for xPt,yPt in zip(xData,yData): if currentBin.getNumPts() >= numXdataInBin: currentBin.spacing = xPt - currentBin.xPts[0] self.bins.append(currentBin) currentBin = Bin(numXPointsToBin = int(numXdataInBin)) currentBin.xPts.append(xPt) if binValue=='countPoints': # then add together all the y axis values that fall within the new bin currentBin.value += yPt elif binValue=='averagePoints': #weight the average numPointsInBin = currentBin.getNumPts() currentBin.value = (numPointsInBin*currentBin.value + yPt)/(numPointsInBin+1) else: #assume newBins are equally spaced binCounter = 0 binSize = newBins[1] - newBins[0] currentBin = Bin(spacing = binSize, leftSide = newBins[binCounter]) for xPt,yPt in zip(xData,yData): if xPt >= currentBin.rightSide: self.bins.append(currentBin) binCounter += 1 currentBin = Bin(spacing = binSize, leftSide = newBins[binCounter]) currentBin.xPts.append(xPt) currentBin.yPts.append(yPt) # but when you plot, plot the y-axis value not at the x-axis pair, but at the midpoint between the x-axis pair # and the one up from it. Assume there is an additional x-axis point at the end with the same spacing as all the others.
def0e7c7c6d0e42909a4dc36014312c805680c64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6231/def0e7c7c6d0e42909a4dc36014312c805680c64/SimpleRebin.py
def onAxis(self, axesGrp, axis, index): #index: index of this axis in the axis array #we need to index that so that axis can be loaded #sequentially. mapper = axis._mapper type = types[mapper.__class__]
240e4410ae902463e8477a703c9381545269ddf5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6231/240e4410ae902463e8477a703c9381545269ddf5/Renderer.py
axisGrp.create_dataset('bin centers', data = NdArray(bcs.datatype(), bcs))
axisGrp.create_dataset('bin centers', data = bcs)
def onAxis(self, axesGrp, axis, index): #index: index of this axis in the axis array #we need to index that so that axis can be loaded #sequentially. mapper = axis._mapper type = types[mapper.__class__]
240e4410ae902463e8477a703c9381545269ddf5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6231/240e4410ae902463e8477a703c9381545269ddf5/Renderer.py
if isinstance(unit, int) or isinstance(unit, float):
if isinstance(unit, int) or isinstance(unit, float) or isinstance(unit, long):
def onUnit(self, unit): if isinstance(unit, int) or isinstance(unit, float): return unit return unit.tostring()
e24de680ae382528064d4b06c84be243288fa2cb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/6231/e24de680ae382528064d4b06c84be243288fa2cb/Loader.py
dst = WithProperties('public_html/binaries/' + remote_name + '.bz2', 'got_revision')
dst = WithProperties('public_html/binaries/' + remote_name, 'got_revision')
def addUploadBinariesStep(factory, binaries): for (local_name, remote_name) in binaries.items(): dst = WithProperties('public_html/binaries/' + remote_name + '.bz2', 'got_revision') factory.addStep(FileUpload(slavesrc=local_name, masterdest=dst, mode=0755))
704fd5ef44d9d7d30dcbe2dbde78e42fb06afdf1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1035/704fd5ef44d9d7d30dcbe2dbde78e42fb06afdf1/common.py
return pretty_date(dt)
return dt
def get_date(s): dt = datetime.strptime(s, '%Y-%m-%d %H:%M:%S') dt -= timedelta(seconds=time.timezone) # sqlite seems to save at GMT... ata :P return pretty_date(dt) # found this online
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
self.conn = cherrypy.thread_data.db self.c = self.conn.cursor()
def get_conn(self): return cherrypy.thread_data.db def get_cur(self): return self.get_conn().cursor() def fetchvar(self, query, args=()): c = self.get_cur() c.execute(query, args) return c.fetchone()[0]
def connect(self, thread_index): cherrypy.thread_data.db = sqlite3.connect('minitwit.sqlite') self.conn = cherrypy.thread_data.db self.c = self.conn.cursor()
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
self.c.execute(query, args) return self.c.fetchone()
c = self.get_cur() c.execute(query, args) return c.fetchone()
def fetchone(self, query, args=()): self.c.execute(query, args) return self.c.fetchone()
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
self.c.execute(query, args) return self.c.fetchalll()
c = self.get_cur() c.execute(query, args) return c.fetchall()
def fetchall(self, query, args=()): self.c.execute(query, args) return self.c.fetchalll()
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
self.c.execute(query, args) self.conn.commit()
c = self.get_cur() c.execute(query, args) self.get_conn().commit()
def query(self, query, args=()): self.c.execute(query, args) self.conn.commit()
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
conn = cherrypy.thread_data.db c = conn.cursor() c.execute("select rowid from users where username = ? and password = ?", (username, md5sum(password))) logged_in = c.fetchone()
logged_in = db.fetchone("select rowid from users where username = ? and password = ?", (username, md5sum(password)))
def login(self, username='', password='', redirect='/'): message = None if len(username) > 0 and len(password) > 0: conn = cherrypy.thread_data.db c = conn.cursor() c.execute("select rowid from users where username = ? and password = ?", (username, md5sum(password))) logged_in = c.fetchone() if logged_in is not None: cherrypy.session['logged_in'] = logged_in[0] raise cherrypy.HTTPRedirect(redirect) else: message = 'Invalid username/password' return templates.get_template('login.html').render(username=username, password=password, message=message)
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
conn = cherrypy.thread_data.db c = conn.cursor() c.execute('select rowid, username from users where rowid = ?', (rowid,)) r = c.fetchone()
r = db.fetchone('select rowid, username from users where rowid = ?', (rowid,))
def get_logged_in(self): try: rowid = cherrypy.session.get('logged_in') conn = cherrypy.thread_data.db c = conn.cursor() c.execute('select rowid, username from users where rowid = ?', (rowid,)) r = c.fetchone() return {'id': r[0], 'username': r[1]} except: return None
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
def default(self, id=None, text=None):
def default(self, id=None, text=None, last_update=None):
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
conn = cherrypy.thread_data.db c = conn.cursor()
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
c.execute('select rowid, text, date from posts where rowid = ?', (id,)) r = c.fetchone()
r = db.fetchone('select rowid, text, date from posts where rowid = ?', (id,))
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
c.execute('insert into posts values (?, ?, datetime("now"))', (logged_in['id'], text)) conn.commit()
db.query('insert into posts values (?, ?, datetime("now"))', (logged_in['id'], text))
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
try: c.execute('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') return json.dumps([{'id': r[0], 'text': r[1], 'date': get_date(r[2]), 'username': r[3]} for r in c.fetchall()]) except: raise cherrypy.HTTPError(404)
if last_update is not None: last_update = int(last_update) / 1000 new_count = db.fetchvar('select count(*) from posts where strftime("%s", date) - ? > 0', (last_update,)) if int(new_count) == 0: return '[]' posts = db.fetchall('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') return json.dumps([{'id': r[0], 'text': r[1], 'date': pretty_date(get_date(r[2])), 'username': r[3]} for r in posts])
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
c.execute('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') posts = [{'id': r[0], 'text': r[1], 'date': get_date(r[2]), 'username': r[3]} for r in c.fetchall()]
posts = db.fetchall('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') posts = [{'id': r[0], 'text': r[1], 'date': pretty_date(get_date(r[2])), 'username': r[3]} for r in posts]
def index(self): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() c.execute('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') posts = [{'id': r[0], 'text': r[1], 'date': get_date(r[2]), 'username': r[3]} for r in c.fetchall()] return templates.get_template('dashboard.html').render(logged_in=logged_in, posts=posts)
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
c.execute('insert into users values (?, ?)', username, md5sum(password)) conn.commit()
db.query('insert into users values (?, ?)', (username, md5sum(password)))
def register(self, username='', password='', conf_password=''): message = None if len(username) > 0 and len(password) > 0 and password == conf_password: c.execute('insert into users values (?, ?)', username, md5sum(password)) conn.commit() raise cherrypy.HTTPRedirect('/session/login') elif password != conf_password: message = "Passwords don't match" return templates.get_template('register.html').render(username=username, password=password, conf_password=conf_password, message=message)
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
conn = cherrypy.thread_data.db c = conn.cursor() c.execute("drop table if exists users") c.execute("drop table if exists posts") c.execute("create table users (username text, password text)") c.execute("create unique index username on users (username)") c.execute("create table posts (user int, text text, date text)") c.execute("create index user on posts (user)") c.execute("insert into users values (?, ?)", ('demo', md5sum('demo'))) c.execute("insert into posts values (?, ?, datetime('now'))", (1, 'Hello world')) conn.commit()
db.query("drop table if exists users") db.query("drop table if exists posts") db.query("create table users (username text, password text)") db.query("create unique index username on users (username)") db.query("create table posts (user int, text text, date text)") db.query("create index user on posts (user)") db.query("insert into users values (?, ?)", ('demo', md5sum('demo'))) db.query("insert into posts values (?, ?, datetime('now'))", (1, 'Hello world'))
def install(self): conn = cherrypy.thread_data.db c = conn.cursor() c.execute("drop table if exists users") c.execute("drop table if exists posts") c.execute("create table users (username text, password text)") c.execute("create unique index username on users (username)") c.execute("create table posts (user int, text text, date text)") c.execute("create index user on posts (user)") c.execute("insert into users values (?, ?)", ('demo', md5sum('demo'))) c.execute("insert into posts values (?, ?, datetime('now'))", (1, 'Hello world')) conn.commit() return "Tables created!"
4b3b0887bbd7693b28f3887b7bce2d7daf7358e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3078/4b3b0887bbd7693b28f3887b7bce2d7daf7358e8/minitwit.py
reqs.append((WMS.STORAGE, self.dataSplitter.getSplitInfo(jobNum)[DataSplitter.SEList]))
reqs.append((WMS.STORAGE, self.dataSplitter.getSplitInfo(jobNum).get(DataSplitter.SEList)))
def getRequirements(self, jobNum): reqs = Module.getRequirements(self, jobNum) if self.dataSplitter != None: reqs.append((WMS.STORAGE, self.dataSplitter.getSplitInfo(jobNum)[DataSplitter.SEList])) return reqs
93a456faf0c22bb4dbe3a0e8630cbf2d3bfef1af /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/93a456faf0c22bb4dbe3a0e8630cbf2d3bfef1af/datamod.py
return self.dataSplitter.getSplitInfo(jobNum)[DataSplitter.SEList] != []
return self.dataSplitter.getSplitInfo(jobNum).get(DataSplitter.SEList) != []
def canSubmit(self, jobNum): if self.checkSE and (self.dataSplitter != None): return self.dataSplitter.getSplitInfo(jobNum)[DataSplitter.SEList] != [] return True
93a456faf0c22bb4dbe3a0e8630cbf2d3bfef1af /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/93a456faf0c22bb4dbe3a0e8630cbf2d3bfef1af/datamod.py
lumis = []
runLumiDict = {}
def splitLumiRanges(lumis, off = 0, singlemode = False): # Split into single runs (todel, toadd) = (set(), set()) for (s, e) in lumis: if s[0] and e[0] and s[0] != e[0]: todel.add((s, e)) toadd.add((s, (s[0],None))) toadd.add(((e[0],1),e)) for x in range(s[0] + 1, e[0]): toadd.add(((x, 1), (x, None))) lumis.difference_update(todel) lumis.update(toadd)
52f35f2f956000a5deb28fa314db5301dbb0feb0 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/52f35f2f956000a5deb28fa314db5301dbb0feb0/lumiInfo.py
lumis.append(([run_id, lumi_id], [run_id, lumi_id]))
if run_id not in runLumiDict: runLumiDict[run_id] = set() runLumiDict[run_id].add(lumi_id)
def splitLumiRanges(lumis, off = 0, singlemode = False): # Split into single runs (todel, toadd) = (set(), set()) for (s, e) in lumis: if s[0] and e[0] and s[0] != e[0]: todel.add((s, e)) toadd.add((s, (s[0],None))) toadd.add(((e[0],1),e)) for x in range(s[0] + 1, e[0]): toadd.add(((x, 1), (x, None))) lumis.difference_update(todel) lumis.update(toadd)
52f35f2f956000a5deb28fa314db5301dbb0feb0 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/52f35f2f956000a5deb28fa314db5301dbb0feb0/lumiInfo.py
sys.stderr.write(se_utils.se_rm.lastlog)
utils.eprint(procRM.getMessage())
def incInfo(x): infos[x] = infos.get(x, 0) + 1
e76b3aedba65f87878b88516a5b506e24c9559f3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/e76b3aedba65f87878b88516a5b506e24c9559f3/downloadFromSE.py
if not se_utils.se_rm(os.path.join(pathSE, name_dest)):
procRM = se_utils.se_rm(os.path.join(pathSE, name_dest)) if procRM.wait() != 0:
def incInfo(x): infos[x] = infos.get(x, 0) + 1
e76b3aedba65f87878b88516a5b506e24c9559f3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/e76b3aedba65f87878b88516a5b506e24c9559f3/downloadFromSE.py
sys.stderr.write(se_utils.se_rm.lastlog)
utils.eprint(procRM.getMessage())
def incInfo(x): infos[x] = infos.get(x, 0) + 1
e76b3aedba65f87878b88516a5b506e24c9559f3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/e76b3aedba65f87878b88516a5b506e24c9559f3/downloadFromSE.py
if True in map(lambda x in state, ['h', 's', 'S', 'T', 'w']):
if True in map(lambda x: x in state, ['h', 's', 'S', 'T', 'w']):
def parseJobState(self, state): if True in map(lambda x in state, ['h', 's', 'S', 'T', 'w']): return Job.QUEUED if True in map(lambda x in state, ['r', 't']): return Job.RUNNING return Job.READY
cb403baa4c61f0d5c1cea99708dd8cc658b66bb0 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/cb403baa4c61f0d5c1cea99708dd8cc658b66bb0/sge.py
if True in map(lambda x in state, ['r', 't']):
if True in map(lambda x: x in state, ['r', 't']):
def parseJobState(self, state): if True in map(lambda x in state, ['h', 's', 'S', 'T', 'w']): return Job.QUEUED if True in map(lambda x in state, ['r', 't']): return Job.RUNNING return Job.READY
cb403baa4c61f0d5c1cea99708dd8cc658b66bb0 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/cb403baa4c61f0d5c1cea99708dd8cc658b66bb0/sge.py
['CMSSW_DIR_UI', 'CMSSW_DIR_PRO']))
['CMSSW_DIR_USER', 'CMSSW_DIR_UI', 'CMSSW_DIR_PRO']))
def isInstrumented(cfgName): cfg = open(cfgName, 'r').read() for tag in [ "FILE_NAMES", "MAX_EVENTS", "SKIP_EVENTS" ]: if (not "__%s__" % tag in cfg) and (not "@%s@" % tag in cfg): return False return True
7ff28a826c9be976f35d0953647e71e6b446daee /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/7ff28a826c9be976f35d0953647e71e6b446daee/cmssw.py
submodules = map(str.strip, submodules.split(","))
submodules = filter(lambda x: x != '', map(str.strip, submodules.split(",")))
def __init__(self, config, module, submodules): Monitoring.__init__(self, config, module) submodules = map(str.strip, submodules.split(",")) self.submodules = map(lambda x: Monitoring.open(x, config, module), submodules)
426d0e5a98b3380f7db9453dfb4fd317f26ea10c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/426d0e5a98b3380f7db9453dfb4fd317f26ea10c/monitoring.py
print "You have selected the following runs and lumi sections:"
print "The following runs and lumi sections are selected:"
def __init__(self, config, datasetExpr, datasetNick, datasetID = 0): DataProvider.__init__(self, config, datasetExpr, datasetNick, datasetID) DataProvider.providers.update({'DBSApiv2': 'dbs'}) if config.getBool('CMSSW', 'dbs blacklist T1', True): T1SEs = ["-srmcms.pic.es", "-ccsrm.in2p3.fr", "-storm-fe-cms.cr.cnaf.infn.it", "-srm-cms.gridpp.rl.ac.uk", "-srm.grid.sinica.edu.tw", "-srm2.grid.sinica.edu.tw"] self.sitefilter.extend(T1SEs)
f897484c5cce690ac68654e9232d688b164fd126 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/f897484c5cce690ac68654e9232d688b164fd126/provider_dbsv2.py
else
else:
def __init__(self, config, datasetExpr, datasetNick, datasetID = 0): DataProvider.__init__(self, config, datasetExpr, datasetNick, datasetID) DataProvider.providers.update({'DBSApiv2': 'dbs'}) if config.getBool('CMSSW', 'dbs blacklist T1', True): T1SEs = ["-srmcms.pic.es", "-ccsrm.in2p3.fr", "-storm-fe-cms.cr.cnaf.infn.it", "-srm-cms.gridpp.rl.ac.uk", "-srm.grid.sinica.edu.tw", "-srm2.grid.sinica.edu.tw"] self.sitefilter.extend(T1SEs)
f897484c5cce690ac68654e9232d688b164fd126 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/f897484c5cce690ac68654e9232d688b164fd126/provider_dbsv2.py
result.append(blockInfo)
if len(blockInfo[DataProvider.FileList]) > 0: result.append(blockInfo)
def lumiFilter(lfn): for lumi in listLumiInfo[lfn]: if selectLumi(lumi, self.selectedLumis): return True return self.selectedLumis == None
f897484c5cce690ac68654e9232d688b164fd126 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/f897484c5cce690ac68654e9232d688b164fd126/provider_dbsv2.py
if opts.save_exprgc: outputGC(lumis) if opts.save_exprjson: outputJSON(lumis)
if opts.diff: print "Unchanged:\n", 30 * "=" outputGC(mergeLumi(list(lumis_uc))) print "\nOnly in reference file:\n", 30 * "=" outputGC(mergeLumi(list(lumis_b))) print "\nNot in reference file:\n", 30 * "=" outputGC(mergeLumi(list(lumis_a))) else: if opts.save_exprgc: outputGC(lumis) if opts.save_exprjson: outputJSON(lumis)
def outputJSON(lumis, stream = sys.stdout): tmp = {} for rlrange in lumis: start, end = rlrange if start[0] != end[0]: raise if start[0] not in tmp: tmp[start[0]] = [] tmp[start[0]].append([start[1], end[1]]) stream.write("{\n") entries = map(lambda run: '\t"%d": %s' % (run, tmp[run]), sorted(tmp.keys())) stream.write("%s\n" % str.join(',\n', entries)) stream.write("}\n")
14a5baef75fc5cce95bc45aed6d882ef6f91d6e7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/14a5baef75fc5cce95bc45aed6d882ef6f91d6e7/lumiInfo.py
fail("Could format lumi sections!" % args)
fail("Could format lumi sections!" % args)
def outputJSON(lumis, stream = sys.stdout): tmp = {} for rlrange in lumis: start, end = rlrange if start[0] != end[0]: raise if start[0] not in tmp: tmp[start[0]] = [] tmp[start[0]].append([start[1], end[1]]) stream.write("{\n") entries = map(lambda run: '\t"%d": %s' % (run, tmp[run]), sorted(tmp.keys())) stream.write("%s\n" % str.join(',\n', entries)) stream.write("}\n")
14a5baef75fc5cce95bc45aed6d882ef6f91d6e7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/14a5baef75fc5cce95bc45aed6d882ef6f91d6e7/lumiInfo.py
splitter = DataSplitter.loadState(workDir)
splitter = DataSplitter.loadState(os.path.join(workDir, 'datamap.tar'))
def outputJSON(lumis, stream = sys.stdout): tmp = {} for rlrange in lumis: start, end = rlrange if start[0] != end[0]: raise if start[0] not in tmp: tmp[start[0]] = [] tmp[start[0]].append([start[1], end[1]]) stream.write("{\n") entries = map(lambda run: '\t"%d": %s' % (run, tmp[run]), sorted(tmp.keys())) stream.write("%s\n" % str.join(',\n', entries)) stream.write("}\n")
71b52549389d2ded282718a35b0b83df5fc57d02 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/71b52549389d2ded282718a35b0b83df5fc57d02/lumiInfo.py
args = str.join(' ', map(lambda x: '"%s"' % ensurePrefix(x).replace('dir://', 'file://'), urls))
args = str.join(' ', map(lambda x: '"%s"' % ensurePrefix(x).replace('dir://', 'file:////'), urls))
def se_runcmd(cmd, varDict = {}, *urls): runLib = utils.pathGC('share', 'gc-run.lib') args = str.join(' ', map(lambda x: '"%s"' % ensurePrefix(x).replace('dir://', 'file://'), urls)) varString = str.join(' ', map(lambda x: 'export %s="%s";' % (x, varDict[x]), varDict)) return utils.LoggedProcess('source %s || exit 1; %s %s %s' % (runLib, varString, cmd, args))
e27c72169fa85a2d86bcb55f45c95fb861be5f69 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/e27c72169fa85a2d86bcb55f45c95fb861be5f69/storage.py
params += " -l h_cpu=%s" % strTime(reqs[WMS.WALLTIME])
params += " -l h_cpu=%s" % strTime(reqs[WMS.CPUTIME])
def getSubmitArguments(self, jobNum, sandbox, stdout, stderr): # Restart jobs = no, job name params = ' -r n -N %s' % self.wms.getJobName(jobNum)
0cf9bdc4cfbefe2f95329ada2f3370f149e212a6 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/0cf9bdc4cfbefe2f95329ada2f3370f149e212a6/sge.py
yield utils.pathGC('python', 'grid_control_cms', 'share', 'DashboardAPI', file)
yield utils.pathGC('python', 'grid_control_cms', 'DashboardAPI', file)
def getFiles(self): for file in ('DashboardAPI.py', 'Logger.py', 'ProcInfo.py', 'apmon.py', 'report.py'): yield utils.pathGC('python', 'grid_control_cms', 'share', 'DashboardAPI', file)
3663b04e9a2b7dc039595413dcad2e0741efb021 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/3663b04e9a2b7dc039595413dcad2e0741efb021/dashboard.py
self.addAttr = dict(map(lambda item: (item, config.get(wmsapi, item)), config.parser.options(wmsapi)))
self.addAttr = {} if wmsapi in config.parser.sections(): self.addAttr = dict(map(lambda item: (item, config.get(wmsapi, item)), config.parser.options(wmsapi)))
def __init__(self, config, module, monitor): WMS.__init__(self, config, module, monitor, 'local')
b872b4f0bcdccea6c42bbdf863d5d755a5e9c377 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/b872b4f0bcdccea6c42bbdf863d5d755a5e9c377/local_wms.py
jobList = utils.sorted(map(lambda (jobNum, path): jobNum, Job.readJobs(opts.workDir)))
jobList = utils.sorted(map(lambda (jobNum, path): jobNum, Job.readJobs(os.path.join(opts.workDir, 'jobs'))))
def getOutputDatasets(opts): # Get job numbers, task id, ... log = utils.ActivityLog(' * Reading task info...') jobList = utils.sorted(map(lambda (jobNum, path): jobNum, Job.readJobs(opts.workDir))) taskInfo = utils.PersistentDict(os.path.join(opts.workDir, 'task.dat'), ' = ') del log print " * Reading task info - done" # Get all config and output data log = None configData = {} outputData = {} dbsLog = utils.PersistentDict(os.path.join(opts.workDir, 'dbs.log'), ' = ', False) for jobNum in jobList: if jobNum % 10 == 0: del log log = utils.ActivityLog(' * Reading job logs - [%d / %d]' % (jobNum, jobList[-1])) (output, config) = readDBSJobInfo(opts, opts.workDir, jobNum) # ignore already registed files in incremental mode for lfn in filter(lambda x: not (opts.incremental and x in dbsLog), output): outputData.update({lfn: output[lfn]}) configData.update(config) print " * Reading job logs - done" # Merge parent infos into output file data if os.path.exists(os.path.join(opts.workDir, 'datacache.dat')): # Get parent infos provider = DataProvider.loadState(Config(), opts.workDir, 'datacache.dat') log = utils.ActivityLog(' * Processing parent infos...') blocks = provider.getBlocks() parentMap = {} for block in blocks: blockInfo = (block[DataProvider.Dataset], block[DataProvider.BlockName]) lfns = map(lambda x: (x[DataProvider.lfn], blockInfo), block[DataProvider.FileList]) parentMap.update(dict(lfns)) # Insert parentage infos for lfn in outputData.keys(): for parentLFN in filter(lambda x: x, outputData[lfn][DBS.PARENT_FILES]): if not DBS.PARENT_INFO in outputData[lfn]: outputData[lfn][DBS.PARENT_INFO] = [] if not parentMap[parentLFN] in outputData[lfn][DBS.PARENT_INFO]: outputData[lfn][DBS.PARENT_INFO].append(parentMap[parentLFN]) del log print " * Processing parent infos - done" # Sort output files into blocks log = None metadata = {} datasets = {} datasetInfos = {} for idx, lfn in enumerate(outputData): if idx % 10 == 0: del log log = utils.ActivityLog(' * Dividing output into blocks - [%d / %d]' % (idx, len(outputData))) # Define dataset split criteria def generateDatasetKey(fileInfo): # Split by dataset parent and config hash (+ job config hash) parentDS = map(lambda (ds, b): ds, fileInfo.get(DBS.PARENT_INFO, [])) jobHash = ('', str(fileInfo[DBS.JOBHASH]))[opts.useJobHash] dsKey = utils.md5(str((fileInfo[DBS.CONFIGHASH], jobHash, parentDS))).hexdigest() # Write summary information: if not dsKey in datasetInfos: if parentDS == []: parentDS = ['None'] datasetInfos[dsKey] = ("%15s: %s\n%15s: %s\n" % ( "Config hash", fileInfo[DBS.CONFIGHASH], "Parent datasets", str.join("\n" + 17*" ", parentDS))) annotation = getAnnotation(fileInfo[DBS.CONFIGHASH], configData) if annotation: datasetInfos[dsKey] += "%15s: %s\n" % ("Annotation", annotation) return dsKey # Define block split criteria def generateBlockKey(fileInfo): # Split by SE and block parent (parent is left out in case of merging) key = utils.md5(str(fileInfo[DBS.SE]) + generateDatasetKey(fileInfo)) if not opts.doMerge: key.update(str(map(lambda (ds, b): b, fileInfo.get(DBS.PARENT_INFO, [])))) return key.hexdigest() dsKey = generateDatasetKey(outputData[lfn]) blockKey = generateBlockKey(outputData[lfn]) if not dsKey in datasets: datasets[dsKey] = {} metadata[dsKey] = {DBS.SIZE: 0, DBS.EVENTS: 0} if not blockKey in datasets[dsKey]: datasets[dsKey][blockKey] = [] metadata[blockKey] = {DBS.SIZE: 0, DBS.EVENTS: 0} # Calculate def incStats(x, info): x[DBS.SIZE] += int(info[DBS.SIZE]) x[DBS.EVENTS] += int(info[DBS.EVENTS]) incStats(metadata[dsKey], outputData[lfn]) incStats(metadata[blockKey], outputData[lfn]) datasets[dsKey][blockKey].append(lfn) print " * Dividing output into blocks - done" # Display dataset information print print " => Identified the following output datasets:" for ds in datasets.keys(): print "%4s * Key %s [%d block(s), %d file(s)]" % ("", ds, len(datasets[ds]), sum(map(len, datasets[ds].values()))) print 7*" " + datasetInfos[ds].replace("\n", "\n" + 7*" ") return (taskInfo['task id'], datasets, metadata, outputData, configData)
d862606bc0fa59bbeb39d59a9febf18925bad8b3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/d862606bc0fa59bbeb39d59a9febf18925bad8b3/datasetDBSAdd.py
tmp.sort(cmp=cmpLumi)
tmp.sort(cmpLumi)
def cmpLumi(a,b): (start_a_run, start_a_lumi) = a[0] (start_b_run, start_b_lumi) = b[0] if start_a_run == start_b_run: return cmp(start_a_lumi, start_b_lumi) else: return cmp(start_a_run, start_b_run)
d8c727b1ba9db51bcde07ea7b37433989a3240e8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/d8c727b1ba9db51bcde07ea7b37433989a3240e8/provider_dbsv2.py
realmain(opts, args)
def processShorthand(optSet): if optSet: parser.parse_args(args = optSet.split() + sys.argv[1:], values = opts)
23d7c40a8f1f0faeeeaadfdd288f6348ecf9076e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/23d7c40a8f1f0faeeeaadfdd288f6348ecf9076e/downloadFromSE2.py
if job.get('download') == 'True' and not opt.markIgnoreDL:
if job.get('download') == 'True' and not opts.markIgnoreDL:
def incInfo(x): infos[x] = infos.get(x, 0) + 1
23d7c40a8f1f0faeeeaadfdd288f6348ecf9076e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/23d7c40a8f1f0faeeeaadfdd288f6348ecf9076e/downloadFromSE2.py
if not 'http://' in src: src = "http://cmsdbsprod.cern.ch/%s/servlet/DBSServlet" % src self.args['url'] = src
if src != '': if not 'http://' in src: src = "http://cmsdbsprod.cern.ch/%s/servlet/DBSServlet" % src self.args['url'] = src
def __init__(self, config, datasetExpr, datasetNick, datasetID = 0): DataProvider.__init__(self, config, datasetExpr, datasetNick, datasetID) DataProvider.providers.update({'DBSApiv2': 'dbs'})
8ff40c8a99d09910378a410fed5476f92791c590 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/8ff40c8a99d09910378a410fed5476f92791c590/provider_dbsv2.py
WMS.__init__(self, config, module, monitor, 'local', wmsapi)
WMS.__init__(self, config, module, monitor, 'local', self.api)
def __init__(self, config, module, monitor): wmsapi = config.get('local', 'wms', self._guessWMS()) if wmsapi != self._guessWMS(): utils.vprint('Default batch system on this host is: %s' % self._guessWMS(), -1, once = True) self.api = LocalWMSApi.open(wmsapi, config, self) utils.vprint('Using batch system: %s' % self.api.__class__.__name__, -1) self.addAttr = {} if config.parser.has_section(wmsapi): self.addAttr = dict(map(lambda item: (item, config.get(wmsapi, item)), config.parser.options(wmsapi)))
0cad501ee6fe7d11e4d244dfe26a572b1c45cfae /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/0cad501ee6fe7d11e4d244dfe26a572b1c45cfae/local_wms.py
list = filter(lambda x: self._jobs.get(x, Job()).attempt < self.maxRetry, self.ready)
list = filter(lambda x: self._jobs.get(x, Job()).attempt - 1 < self.maxRetry, self.ready)
def getSubmissionJobs(self, maxsample): # Determine number of jobs to submit submit = self.nJobs nQueued = len(self.queued) if self.inQueue > 0: submit = min(submit, self.inQueue - nQueued) if self.inFlight > 0: submit = min(submit, self.inFlight - nQueued - len(self.running)) if self.config.opts.continuous: submit = min(submit, maxsample) submit = max(submit, 0)
29ce5a23e477b88d191bb796bb2443bffeab4377 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/29ce5a23e477b88d191bb796bb2443bffeab4377/job_db.py
parser.add_option("-u", '--update', dest="update", default=False, action="store_true",
parser.add_option("-u", '--update', dest="update", default=False, action="store_true",
def main(args): help = \
c6a0ee5c2a66f2279cfdc6c8b3f53f6239ab6664 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/c6a0ee5c2a66f2279cfdc6c8b3f53f6239ab6664/downloadFromSE.py
opts.output = os.path.abspath(os.path.join(workDir, 'se_output'))
opts.output = os.path.join(workDir, 'se_output') opts.output = os.path.abspath(opts.output)
def main(args): help = \
c6a0ee5c2a66f2279cfdc6c8b3f53f6239ab6664 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/c6a0ee5c2a66f2279cfdc6c8b3f53f6239ab6664/downloadFromSE.py
if (realmain(opts, args) or not opts.loop) and not opts.infinite: break time.sleep(60)
try: if (realmain(opts, args) or not opts.loop) and not opts.infinite: break time.sleep(60) except KeyboardInterrupt: print "\n\nDownload aborted!\n" sys.exit(1)
def processShorthand(optSet): if optSet: parser.parse_args(args = optSet.split() + sys.argv[1:], values = opts)
45e0a8366ef09ad2b9900eb80d335aaa03af6bdc /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/45e0a8366ef09ad2b9900eb80d335aaa03af6bdc/downloadFromSE.py
utils.eprint(procRM.getMessage())
utils.eprint("%s\n\n" % procRM.getMessage())
def dlfs_rm(path, msg): procRM = se_utils.se_rm(path) if procRM.wait() != 0: print "\t\tUnable to remove %s!" % msg utils.eprint(procRM.getMessage())
45e0a8366ef09ad2b9900eb80d335aaa03af6bdc /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/45e0a8366ef09ad2b9900eb80d335aaa03af6bdc/downloadFromSE.py
procCP = se_utils.se_copy(os.path.join(pathSE, name_dest), outFilePath) if procCP.wait() != 0:
myGetSize = lambda x: "(%7s)" % gcSupport.prettySize(os.path.getsize(x.replace('file://', ''))) def monitorFile(path, lock): while not lock.acquire(False): try: print "\r\t", name_dest, myGetSize(path), sys.stdout.flush() except: pass time.sleep(1) lock.release() monitorLock = threading.Lock() monitorLock.acquire() monitor = threading.Thread(target = monitorFile, args = (checkPath, monitorLock)) monitor.start() try: procCP = se_utils.se_copy(os.path.join(pathSE, name_dest), outFilePath, tmp = checkPath) result = procCP.wait() finally: monitorLock.release() monitor.join() if result != 0:
def processSingleJob(jobNum): print "Job %d:" % jobNum,
45e0a8366ef09ad2b9900eb80d335aaa03af6bdc /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/45e0a8366ef09ad2b9900eb80d335aaa03af6bdc/downloadFromSE.py
checkPath = checkPath.replace('file://', '') print "(%s)" % gcSupport.prettySize(os.path.getsize(checkPath)), hashLocal = md5sum(checkPath) if 'file://' not in outFilePath:
hashLocal = md5sum(checkPath.replace('file://', '')) if not ('file://' in outFilePath):
def processSingleJob(jobNum): print "Job %d:" % jobNum,
45e0a8366ef09ad2b9900eb80d335aaa03af6bdc /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/45e0a8366ef09ad2b9900eb80d335aaa03af6bdc/downloadFromSE.py
lumirange = __LUMI_RANGE__
lumirange = [__LUMI_RANGE__]
def customise_for_gc(process): try: maxevents = __MAX_EVENTS__ process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(maxevents) ) except: pass # Dataset related setup try: tmp = __SKIP_EVENTS__ process.source = cms.Source("PoolSource", skipEvents = cms.untracked.uint32(__SKIP_EVENTS__), fileNames = cms.untracked.vstring(__FILE_NAMES__) ) try: secondary = __FILE_NAMES2__ process.source.secondaryFileNames = cms.untracked.vstring(secondary) except: pass try: lumirange = __LUMI_RANGE__ process.source.lumisToProcess = cms.untracked.VLuminosityBlockRange(lumirange) except: pass except: pass # Generator related setup try: if hasattr(process, "generator"): process.source.firstLuminosityBlock = cms.untracked.uint32(1+__MY_JOBID__) except: pass if hasattr(process, "RandomNumberGeneratorService"): randSvc = RandomNumberServiceHelper(process.RandomNumberGeneratorService) randSvc.populate() process.AdaptorConfig = cms.Service("AdaptorConfig", enable=cms.untracked.bool(True), stats = cms.untracked.bool(True), ) return (process)
ce7cd841de7e89cdc786fd2ac62df64694e8866c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/ce7cd841de7e89cdc786fd2ac62df64694e8866c/fragmentForCMSSW.py
self.sandPath = config.getPath('local', 'sandbox path', os.path.join(config.workDir, 'sandbox'))
self.sandPath = config.getPath('local', 'sandbox path', os.path.join(config.workDir, 'sandbox'), check=False)
def __init__(self, config, module, monitor): wmsapi = config.get('local', 'wms', self._guessWMS()) if wmsapi != self._guessWMS(): utils.vprint('Default batch system on this host is: %s' % self._guessWMS(), -1, once = True) self.api = LocalWMSApi.open(wmsapi, config, self) utils.vprint('Using batch system: %s' % self.api.__class__.__name__, -1) self.addAttr = {} if config.parser.has_section(wmsapi): self.addAttr = dict(map(lambda item: (item, config.get(wmsapi, item)), config.parser.options(wmsapi)))
a556cd996f24be393e47061c3bf846ea796f6039 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/a556cd996f24be393e47061c3bf846ea796f6039/local_wms.py
try: listBlockInfo = api.listBlocks(self.datasetPath, nosite=True) (listFileInfo, seList) = ([], {}) def listFileInfoThread(self, result): result.extend(api.listFiles(self.datasetPath, retriveList=QM(self.selectedLumis, ['retrive_lumi'], []))) tFile = utils.gcStartThread(listFileInfoThread, self, listFileInfo)
def getWithPhedex(listBlockInfo, seList):
def getBlocksInternal(self): import urllib2 api = createDBSAPI(self.url) try: listBlockInfo = api.listBlocks(self.datasetPath, nosite=True) # Start thread to retrieve list of files (listFileInfo, seList) = ([], {}) def listFileInfoThread(self, result): result.extend(api.listFiles(self.datasetPath, retriveList=QM(self.selectedLumis, ['retrive_lumi'], []))) tFile = utils.gcStartThread(listFileInfoThread, self, listFileInfo) # Get dataset list from PhEDex (concurrent with listFiles) phedexArgFmt = lambda x: ('block=%s' % x['Name']).replace('/', '%2F').replace('#', '%23') phedexArg = str.join('&', map(phedexArgFmt, listBlockInfo)) phedexData = urllib2.urlopen('https://cmsweb.cern.ch/phedex/datasvc/json/prod/blockreplicas', phedexArg).read() if str(phedexData).lower().find('error') != -1: raise DatasetError("Phedex error '%s'" % phedexData) phedexDict = eval(compile(phedexData.replace('null','None'), '<string>', 'eval'))['phedex']['block'] for phedexBlock in phedexDict: phedexSelector = lambda x: (x['complete'] == 'y') or not self.onlyComplete phedexSites = dict(map(lambda x: (x['node'], x['se']), filter(phedexSelector, phedexBlock['replica']))) phedexSitesOK = utils.doBlackWhiteList(phedexSites.keys(), self.phedexBL) seList[phedexBlock['name']] = map(lambda x: phedexSites[x], phedexSitesOK) tFile.join() except: raise RethrowError('DBS exception')
e3a9bddbdd4b85bc176420dc537eed452b7b5157 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/e3a9bddbdd4b85bc176420dc537eed452b7b5157/provider_dbsv2.py
blockInfo[DataProvider.SEList] = seList.get(block['Name'], [])
if self.phedex: blockInfo[DataProvider.SEList] = seList.get(block['Name'], []) else: blockInfo[DataProvider.SEList] = map(lambda x: x['Name'], block['StorageElementList'])
def lumiFilter(lumilist): if self.selectedLumis: for lumi in lumilist: if selectLumi((lumi['RunNumber'], lumi['LumiSectionNumber']), self.selectedLumis): return True return self.selectedLumis == None
e3a9bddbdd4b85bc176420dc537eed452b7b5157 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/e3a9bddbdd4b85bc176420dc537eed452b7b5157/provider_dbsv2.py
params += ' -c %d' % ((reqs[WMS.WALLTIME] + 59) / 60)
params += ' -W %d' % ((reqs[WMS.WALLTIME] + 59) / 60) if WMS.CPUTIME in reqs: params += ' -c %d' % ((reqs[WMS.CPUTIME] + 59) / 60)
def getSubmitArguments(self, jobNum, sandbox, stdout, stderr, addAttr): # Job name params = ' -J %s' % self.wms.getJobName(jobNum) # Job requirements reqs = dict(self.wms.getRequirements(jobNum)) if WMS.SITES in reqs: params += ' -q %s' % reqs[WMS.SITES][0] if WMS.WALLTIME in reqs: params += ' -c %d' % ((reqs[WMS.WALLTIME] + 59) / 60) # IO paths params += ' -o %s -e %s' % (stdout, stderr) return params
3fafd09316edfac9bccc2d04886a8399a3e25d08 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/3fafd09316edfac9bccc2d04886a8399a3e25d08/lsf.py
if opts.shuffle: random.shuffle(jobList) else: jobList.sort() for jobNum in jobList:
def processSingleJob(jobNum):
def incInfo(x): infos[x] = infos.get(x, 0) + 1
7945a33e9682b7ed9f4328384dd89a4436769a03 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/7945a33e9682b7ed9f4328384dd89a4436769a03/downloadFromSE.py
continue
return
def incInfo(x): infos[x] = infos.get(x, 0) + 1
7945a33e9682b7ed9f4328384dd89a4436769a03 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/7945a33e9682b7ed9f4328384dd89a4436769a03/downloadFromSE.py
incInfo("Processing") continue
return incInfo("Processing")
def incInfo(x): infos[x] = infos.get(x, 0) + 1
7945a33e9682b7ed9f4328384dd89a4436769a03 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/7945a33e9682b7ed9f4328384dd89a4436769a03/downloadFromSE.py
incInfo("Downloaded") continue
return incInfo("Downloaded")
def incInfo(x): infos[x] = infos.get(x, 0) + 1
7945a33e9682b7ed9f4328384dd89a4436769a03 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/7945a33e9682b7ed9f4328384dd89a4436769a03/downloadFromSE.py
break
sys.exit(1)
def incInfo(x): infos[x] = infos.get(x, 0) + 1
7945a33e9682b7ed9f4328384dd89a4436769a03 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/7945a33e9682b7ed9f4328384dd89a4436769a03/downloadFromSE.py
incInfo("No files") continue
return incInfo("No files")
def incInfo(x): infos[x] = infos.get(x, 0) + 1
7945a33e9682b7ed9f4328384dd89a4436769a03 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/7945a33e9682b7ed9f4328384dd89a4436769a03/downloadFromSE.py
continue
return
def incInfo(x): infos[x] = infos.get(x, 0) + 1
7945a33e9682b7ed9f4328384dd89a4436769a03 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/7945a33e9682b7ed9f4328384dd89a4436769a03/downloadFromSE.py
dlfs_rm('file://%s' % checkPath, 'SE file')
if 'file://' not in outFilePath: dlfs_rm('file://%s' % checkPath, 'SE file')
def incInfo(x): infos[x] = infos.get(x, 0) + 1
7945a33e9682b7ed9f4328384dd89a4436769a03 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/7945a33e9682b7ed9f4328384dd89a4436769a03/downloadFromSE.py
continue
return
def incInfo(x): infos[x] = infos.get(x, 0) + 1
7945a33e9682b7ed9f4328384dd89a4436769a03 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/7945a33e9682b7ed9f4328384dd89a4436769a03/downloadFromSE.py
userDefaultsFile = resolvePath("~/.grid-control.conf", check = False)
userDefaultsFile = utils.resolvePath("~/.grid-control.conf", check = False)
def parseFileInt(fn): try: parser.readfp(open(fn, 'r')) except IOError: raise ConfigError("Error while reading configuration file '%s'!" % fn) except cp.Error: print "Configuration file `%s' contains an error:" % fn raise
545a17b18e0b5208d69b9ae3f55d6501cf3e90a0 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/545a17b18e0b5208d69b9ae3f55d6501cf3e90a0/config.py
args = str.join(' ', map(lambda x: '"%s"' % ensurePrefix(x).replace('dir://', 'file:////'), urls))
args = str.join(' ', map(lambda x: '"%s"' % ensurePrefix(x).replace('dir://', 'file://'), urls))
def se_runcmd(cmd, varDict = {}, *urls): runLib = utils.pathGC('share', 'gc-run.lib') args = str.join(' ', map(lambda x: '"%s"' % ensurePrefix(x).replace('dir://', 'file:////'), urls)) varString = str.join(' ', map(lambda x: 'export %s="%s";' % (x, varDict[x]), varDict)) return utils.LoggedProcess('source %s || exit 1; %s %s %s' % (runLib, varString, cmd, args))
fbab9d43de135c3f1fbb52ffb38c27136d9d8f66 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/fbab9d43de135c3f1fbb52ffb38c27136d9d8f66/storage.py
if infos["Downloaded"] == len(jobList):
if ("Downloaded" in infos) and (infos["Downloaded"] == len(jobList)):
def incInfo(x): infos[x] = infos.get(x, 0) + 1
944df4bf992b12542c467b72778c7eaa4cb156da /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/944df4bf992b12542c467b72778c7eaa4cb156da/downloadFromSE.py
lambda se_rm(target) = utils.LoggedProcess(se_runcmd("url_rm", se_url(target)))
se_rm = lambda target: utils.LoggedProcess(se_runcmd("url_rm", se_url(target)))
def se_runcmd(cmd, urls): runLib = utils.pathGC('share', 'gc-run.lib') urlargs = str.join(' ', map(lambda x: '"%s"' % x.replace('dir://', 'file://'), urls)) return 'source %s || exit 1; print_and_eval "%s" %s' % (runLib, cmd, urlargs)
0bcba6fd56c2b09fe2b8c7a8a5bea812ed7b0651 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8443/0bcba6fd56c2b09fe2b8c7a8a5bea812ed7b0651/se_utils.py