rem
stringlengths 1
322k
| add
stringlengths 0
2.05M
| context
stringlengths 4
228k
| meta
stringlengths 156
215
|
---|---|---|---|
def populate_SkyLocTable(skyloctable,coinc,adt60,adt90,adt60dD60,adt90dD90,\ | def populate_SkyLocTable(skyloctable,coinc,adt60,adt90,arank60,arank90,\ | def populate_SkyLocTable(skyloctable,coinc,adt60,adt90,adt60dD60,adt90dD90,\ pt,grid_fname,skymap_fname=None): """ populate a row in a skyloctable """ row = skyloctable.RowType() row.end_time = coinc.time row.set_ifos(coinc.ifo_list) rhosquared = 0.0 for ifo in coinc.ifo_list: rhosquared += coinc.snr[ifo]*coinc.snr[ifo] row.comb_snr = sqrt(rhosquared) row.dec,row.ra = pt[0],pt[1] row.a60dt = adt60 row.a90dt = adt90 row.a60dt60dD = adt60dD60 row.a90dt90dD = adt90dD90 row.min_eff_distance = min(effD for effD in coinc.eff_distances.values()) if skymap_fname: row.skymap = os.path.basename(str(skymap_fname)) else: row.skymap = skymap_fname row.grid = os.path.basename(str(grid_fname)) skyloctable.append(row) | 638b625f25ecf60716adb7235da0bc0e8d672439 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/638b625f25ecf60716adb7235da0bc0e8d672439/skylocutils.py |
row.a60dt60dD = adt60dD60 row.a90dt90dD = adt90dD90 | row.a60rank = arank60 row.a90rank = arank90 | def populate_SkyLocTable(skyloctable,coinc,adt60,adt90,adt60dD60,adt90dD90,\ pt,grid_fname,skymap_fname=None): """ populate a row in a skyloctable """ row = skyloctable.RowType() row.end_time = coinc.time row.set_ifos(coinc.ifo_list) rhosquared = 0.0 for ifo in coinc.ifo_list: rhosquared += coinc.snr[ifo]*coinc.snr[ifo] row.comb_snr = sqrt(rhosquared) row.dec,row.ra = pt[0],pt[1] row.a60dt = adt60 row.a90dt = adt90 row.a60dt60dD = adt60dD60 row.a90dt90dD = adt90dD90 row.min_eff_distance = min(effD for effD in coinc.eff_distances.values()) if skymap_fname: row.skymap = os.path.basename(str(skymap_fname)) else: row.skymap = skymap_fname row.grid = os.path.basename(str(grid_fname)) skyloctable.append(row) | 638b625f25ecf60716adb7235da0bc0e8d672439 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/638b625f25ecf60716adb7235da0bc0e8d672439/skylocutils.py |
def __init__(self, *args): pipeline.LigolwAddNode.__init__(self, *args) | def __init__(self, job, remove_input, *args): pipeline.LigolwAddNode.__init__(self, job, *args) | def __init__(self, *args): pipeline.LigolwAddNode.__init__(self, *args) self.input_cache = [] self.output_cache = [] self.cache_dir = os.path.join(os.getcwd(), self.job().cache_dir) | 91a38cda9bfd71714f9f48773267303e4bb7fd9c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/91a38cda9bfd71714f9f48773267303e4bb7fd9c/power.py |
for c in cache: self.add_var_arg("--remove-input-except %s" % c.path()) | if self.remove_input: for c in cache: self.add_var_arg("--remove-input-except %s" % c.path()) | def add_preserve_cache(self, cache): for c in cache: self.add_var_arg("--remove-input-except %s" % c.path()) | 91a38cda9bfd71714f9f48773267303e4bb7fd9c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/91a38cda9bfd71714f9f48773267303e4bb7fd9c/power.py |
def make_lladd_fragment(dag, parents, tag, segment = None, input_cache = None, preserve_cache = None, extra_input_cache = None): node = LigolwAddNode(lladdjob) | def make_lladd_fragment(dag, parents, tag, segment = None, input_cache = None, remove_input = False, preserve_cache = None, extra_input_cache = None): node = LigolwAddNode(lladdjob, remove_input = remove_input) | def make_lladd_fragment(dag, parents, tag, segment = None, input_cache = None, preserve_cache = None, extra_input_cache = None): node = LigolwAddNode(lladdjob) # link to parents for parent in parents: node.add_parent(parent) # build input cache if input_cache is None: # default is to use all output files from parents for parent in parents: node.add_input_cache(parent.get_output_cache()) else: # but calling code can provide its own collection node.add_input_cache(input_cache) if extra_input_cache is not None: # sometimes it helps to add some extra node.add_input_cache(extra_input_cache) if preserve_cache is not None: node.add_preserve_cache(preserve_cache) # construct names for the node and output file, and override the # segment if needed [cache_entry] = node.get_output_cache() if segment is None: segment = cache_entry.segment node.set_name("lladd_%s_%s_%d_%d" % (tag, cache_entry.observatory, int(segment[0]), int(abs(segment)))) node.set_output("%s-%s-%d-%d.xml.gz" % (cache_entry.observatory, tag, int(segment[0]), int(abs(segment))), segment = segment) node.set_retry(3) dag.add_node(node) return set([node]) | 91a38cda9bfd71714f9f48773267303e4bb7fd9c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/91a38cda9bfd71714f9f48773267303e4bb7fd9c/power.py |
return make_lladd_fragment(dag, nodes, tag) | return make_lladd_fragment(dag, nodes, tag, remove_input = True) | def make_multibinj_fragment(dag, seg, tag): flow = float(powerjob.get_opts()["low-freq-cutoff"]) fhigh = flow + float(powerjob.get_opts()["bandwidth"]) nodes = make_binj_fragment(dag, seg, tag, 0.0, flow, fhigh) return make_lladd_fragment(dag, nodes, tag) | 91a38cda9bfd71714f9f48773267303e4bb7fd9c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/91a38cda9bfd71714f9f48773267303e4bb7fd9c/power.py |
coincTrigs = CoincInspiralUtils.coincInspiralTable(inspTrigs,'snr') | statistic = CoincInspiralUtils.coincStatistic('snr',None,None) coincTrigs = CoincInspiralUtils.coincInspiralTable(inspTrigs,statistic) | def get_coincs_from_coire(self,files): """ uses CoincInspiralUtils to get data from old-style (coire'd) coincs """ coincTrigs = CoincInspiralUtils.coincInspiralTable() inspTrigs = SnglInspiralUtils.ReadSnglInspiralFromFiles(files, \ mangle_event_id = True,verbose=None) #note that it's hardcoded to use snr as the statistic coincTrigs = CoincInspiralUtils.coincInspiralTable(inspTrigs,'snr') try: inspInj = SimInspiralUtils.ReadSimInspiralFromFiles(files) coincTrigs.add_sim_inspirals(inspInj) #FIXME: name the exception! except: pass | efa659317ebb3cc99bbe328a0a0bf36b28712062 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/efa659317ebb3cc99bbe328a0a0bf36b28712062/skylocutils.py |
ds = pi*sqrt(2.0)*resolution/180.0 | ds = pi*resolution/180.0 | def gridsky(resolution): """ grid the sky up into roughly square regions resolution is the length of a side the points get placed at the center of the squares and to first order each square has an area of resolution^2 """ latitude = 0.0 longitude = pi ds = pi*sqrt(2.0)*resolution/180.0 points = [(latitude-0.5*pi, longitude)] while latitude <= pi: latitude += ds longitude = 0.0 points.append((latitude-0.5*pi, longitude)) while longitude <= 2.0*pi: longitude += ds / abs(sin(latitude)) points.append((latitude-0.5*pi, longitude)) #there's some slop so get rid of it and only focus on points on the sphere sphpts = [] for pt in points: if pt[0] > pi/2 or pt[0] < -pi/2 \ or pt[1] > 2*pi or pt[1] < 0: pass else: sphpts.append(pt) return sphpts | 92cbddf405d13484ba64bef9acff8c41d7ebb714 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/92cbddf405d13484ba64bef9acff8c41d7ebb714/skylocutils.py |
fgtemp = finegrid | fgtemp = finegrid[:] | def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid coarsedict = {} ds = coarseres*pi/180 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*abs(sin(fpt[1]))*abs(sin(fpt[1])) \ <= ds*ds/4: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp | 92cbddf405d13484ba64bef9acff8c41d7ebb714 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/92cbddf405d13484ba64bef9acff8c41d7ebb714/skylocutils.py |
ds = coarseres*pi/180 | ds = coarseres*pi/180.0 | def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid coarsedict = {} ds = coarseres*pi/180 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*abs(sin(fpt[1]))*abs(sin(fpt[1])) \ <= ds*ds/4: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp | 92cbddf405d13484ba64bef9acff8c41d7ebb714 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/92cbddf405d13484ba64bef9acff8c41d7ebb714/skylocutils.py |
if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*abs(sin(fpt[1]))*abs(sin(fpt[1])) \ <= ds*ds/4: | if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4.0 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*sin(cpt[0])*sin(cpt[0]) \ <= ds*ds/4.0: | def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid coarsedict = {} ds = coarseres*pi/180 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*abs(sin(fpt[1]))*abs(sin(fpt[1])) \ <= ds*ds/4: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp | 92cbddf405d13484ba64bef9acff8c41d7ebb714 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/92cbddf405d13484ba64bef9acff8c41d7ebb714/skylocutils.py |
s = numpy.real(s) | s = numpy.real(s) | def IMRpeakAmp(m1,m2,spin1z,spin2z,d): """ IMRpeakAmp finds the peak amplitude of the waveform for a given source parameters and the source distance. usage: IMRpeakAmp(m1,m2,spin1z,spin2z,distance) e.g. spawaveApp.IMRpeakAmp(30,40,0.45,0.5,100) """ chi = spawaveform.computechi(m1, m2, spin1z, spin2z) imrfFinal = spawaveform.imrffinal(m1, m2, chi, 'fcut') fLower = 10.0 order = 7 dur = 2**numpy.ceil(numpy.log2(spawaveform.chirptime(m1,m2,order,fLower))) sr = 2**numpy.ceil(numpy.log2(imrfFinal*2)) deltaF = 1.0 / dur deltaT = 1.0 / sr s = numpy.empty(sr * dur, 'complex128') spawaveform.imrwaveform(m1, m2, deltaF, fLower, s, spin1z, spin2z) s = scipy.ifft(s) #s = numpy.abs(s) s = numpy.real(s) max = numpy.max(s)/d return max | 3608b615d81850531b55d6a54047a3429ee82d64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/3608b615d81850531b55d6a54047a3429ee82d64/spawaveApp.py |
sngl_burst.* time_slide.offset, | sngl_burst.*, time_slide.offset | def add_noninjections(self, param_func, database, *args): # iterate over burst<-->burst coincs cursor = database.connection.cursor() for coinc_event_id, time_slide_id in database.connection.cursor().execute(""" | b103add88675e74edd370e9fc17f8b46d37b42e3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b103add88675e74edd370e9fc17f8b46d37b42e3/ligolw_burca_tailor.py |
except ImportError, KeyError: | except (ImportError, KeyError): | def get_username(): """ Try to retrieve the username from a variety of sources. First the environment variable LOGNAME is tried, if that is not set the environment variable USERNAME is tried, if that is not set the password database is consulted (only on Unix systems, if the import of the pwd module succedes), finally if that fails KeyError is raised. """ try: return os.environ["LOGNAME"] except KeyError: pass try: return os.environ["USERNAME"] except KeyError: pass try: import pwd return pwd.getpwuid(os.getuid())[0] except ImportError, KeyError: raise KeyError | 8f151a7950beba23473616a6e5a1011cbec31430 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/8f151a7950beba23473616a6e5a1011cbec31430/process.py |
a list of files or an empty list if nothing found | a list of files or an empty list if nothing found. It uses the pathing information from the files passed via cacheListing to aid in our filesystem search. | def __readCache__(self,cacheListing=list()): """ Simple mehtod to read in a cache or list of cache files and return a list of files or an empty list if nothing found """ #Open the cache entry and search for those entrys fileListing=list() for entry in cacheListing: #Cache files listed themselves comment out following line fileListing.append(entry) fileListing.extend([x.rstrip("\n") for x in file(entry)]) #PATCH START to add in the z distribution files for fname in fileListing: if ".html" in fname: zFile=fname.replace(".html",".txt") fileListing.append(zFile) #PATCH END finalList=list() for thisFile in fileListing: #Search filesystem for file full path finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile)) #Look for potential matching thumbnails if thisFile.endswith(".png"): finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile.replace(".png","?thumb?png"))) if len(finalList) < 1: return list() else: return finalList | b5036db122ffd4acba48f4b4049b118c8bb775f4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b5036db122ffd4acba48f4b4049b118c8bb775f4/makeCheckListWiki.py |
fileListing=list() | finalList=list() | def __readCache__(self,cacheListing=list()): """ Simple mehtod to read in a cache or list of cache files and return a list of files or an empty list if nothing found """ #Open the cache entry and search for those entrys fileListing=list() for entry in cacheListing: #Cache files listed themselves comment out following line fileListing.append(entry) fileListing.extend([x.rstrip("\n") for x in file(entry)]) #PATCH START to add in the z distribution files for fname in fileListing: if ".html" in fname: zFile=fname.replace(".html",".txt") fileListing.append(zFile) #PATCH END finalList=list() for thisFile in fileListing: #Search filesystem for file full path finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile)) #Look for potential matching thumbnails if thisFile.endswith(".png"): finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile.replace(".png","?thumb?png"))) if len(finalList) < 1: return list() else: return finalList | b5036db122ffd4acba48f4b4049b118c8bb775f4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b5036db122ffd4acba48f4b4049b118c8bb775f4/makeCheckListWiki.py |
finalList=list() for thisFile in fileListing: finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile)) if thisFile.endswith(".png"): finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile.replace(".png","?thumb?png"))) if len(finalList) < 1: return list() else: return finalList | pathingInfo=os.path.dirname(entry) for thisFile in fileListing: finalList.extend(fnmatch.filter(self.fsys,"*%s*%s"%(pathingInfo,thisFile))) if thisFile.endswith(".png"): finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile.replace(".png","?thumb?png"))) return finalList | def __readCache__(self,cacheListing=list()): """ Simple mehtod to read in a cache or list of cache files and return a list of files or an empty list if nothing found """ #Open the cache entry and search for those entrys fileListing=list() for entry in cacheListing: #Cache files listed themselves comment out following line fileListing.append(entry) fileListing.extend([x.rstrip("\n") for x in file(entry)]) #PATCH START to add in the z distribution files for fname in fileListing: if ".html" in fname: zFile=fname.replace(".html",".txt") fileListing.append(zFile) #PATCH END finalList=list() for thisFile in fileListing: #Search filesystem for file full path finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile)) #Look for potential matching thumbnails if thisFile.endswith(".png"): finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile.replace(".png","?thumb?png"))) if len(finalList) < 1: return list() else: return finalList | b5036db122ffd4acba48f4b4049b118c8bb775f4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b5036db122ffd4acba48f4b4049b118c8bb775f4/makeCheckListWiki.py |
myCacheMask="*/%s-analyseQscan_%s_%s*_seis_rds*.cache"%(sngl.ifo,sngl.ifo,timeString) | myCacheMask="*%s*/%s-analyseQscan_%s_%s*_seis_rds*.cache"%\ (self.coinc.type,sngl.ifo,sngl.ifo,timeString) | def get_analyzeQscan_SEIS(self): """ This seeks out the html and png files associated with SEIS result of an analyzeQscan job. """ cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*/%s-analyseQscan_%s_%s*_seis_rds*.cache"%(sngl.ifo,sngl.ifo,timeString) #Read the cache file or files cacheList.extend(fnmatch.filter(self.fsys,myCacheMask)) cacheFiles=self.__readCache__(cacheList) return cacheFiles | b5036db122ffd4acba48f4b4049b118c8bb775f4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b5036db122ffd4acba48f4b4049b118c8bb775f4/makeCheckListWiki.py |
myCacheMask="*/%s-analyseQscan_%s_%s_rds*.cache"%(sngl.ifo,sngl.ifo,timeString) | myCacheMask="*%s*/%s-analyseQscan_%s_%s_rds*.cache"%\ (self.coint.type,sngl.ifo,sngl.ifo,timeString) | def get_analyzeQscan_RDS(self): """ """ #analyseQscan.py_FG_RDS_full_data/H1-analyseQscan_H1_931176926_116_rds-unspecified-gpstime.cache cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*/%s-analyseQscan_%s_%s_rds*.cache"%(sngl.ifo,sngl.ifo,timeString) #Ignore the files with seis_rds in them for x in fnmatch.filter(self.fsys,myCacheMask): if not x.__contains__('seis_rds'): cacheList.append(x) #Read the cache file or files cacheFiles=self.__readCache__(cacheList) return cacheFiles | b5036db122ffd4acba48f4b4049b118c8bb775f4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b5036db122ffd4acba48f4b4049b118c8bb775f4/makeCheckListWiki.py |
myCacheMask="*/%s-analyseQscan_%s_%s*_ht*.cache"%(sngl.ifo,sngl.ifo,timeString) | myCacheMask="*%s*/%s-analyseQscan_%s_%s*_ht*.cache"\ %(self.coinc.type,sngl.ifo,sngl.ifo,timeString) | def get_analyzeQscan_HT(self): """ """ #analyseQscan.py_FG_HT_full_data/H1-analyseQscan_H1_931176926_116_ht-unspecified-gpstime.cache cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*/%s-analyseQscan_%s_%s*_ht*.cache"%(sngl.ifo,sngl.ifo,timeString) cacheList.extend(fnmatch.filter(self.fsys,myCacheMask)) #Read the cache file or files cacheFiles=self.__readCache__(cacheList) return cacheFiles | b5036db122ffd4acba48f4b4049b118c8bb775f4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b5036db122ffd4acba48f4b4049b118c8bb775f4/makeCheckListWiki.py |
cellString=cellString+" %s Z-Percentage:%1.2f <<BR>> "%(myName,float(myRank)) | cellString=cellString+" %s Z-Percentage:%1.2f <<BR>> "%(myName,float(myRank)) | def insertAnalyzeQscanTable(self, images=None, thumbs=None, indexes=None, ranksOmega=None, imagesAQ=None, thumbsAQ=None, indexesAQ=None, ranksAQ=None): """ Insert a multiple IFO table with 5 cols with the AQ underneath this depends on the numer of IFO keys in indexes dictionary. The option channelRanks is not required to change the plot order! Channel ranks is dict similar in shape to other args. Cells are shaded light grey if they are top N channels and that the trigger is greater in value that 0.5. Assuming the channelRanks dict is not empty. """ #Review the keys for Qscans and analyzeQscans. if not images.keys()==thumbs.keys()==indexes.keys(): sys.stderr.write("Error: Keys for Qscan tables creations inconsistent!\n") if not imagesAQ.keys()==thumbsAQ.keys()==indexesAQ.keys(): sys.stderr.write("Error: Keys for Qscan tables creations inconsistent!\n") keyList=indexes.keys() if len(keyList) < indexesAQ.keys(): keyList=indexesAQ.keys() for ifo in keyList: # If channel files exist read those # assuming that there are related images to plot channelNames=list() if ranksOmega[ifo] and images[ifo]: #Add only channels in Omega with a plot also. tmpChannels=[str(x[0]).strip() for x in ranksOmega[ifo]] for image in images[ifo]: for myChan in tmpChannels: if os.path.basename(image).__contains__(myChan): channelNames.append(myChan) if ranksAQ[ifo] and imagesAQ[ifo]: #Add only channels in Omega with a plot also. tmpChannels=[str(x[0]).strip() for x in ranksAQ[ifo]] for imageAQ in imagesAQ[ifo]: for myChan in tmpChannels: if os.path.basename(imageAQ).__contains__(myChan): channelNames.append(myChan) if (images[ifo]) and (not ranksOmega[ifo]): sys.stdout.write("Converting Omega filenames to channel names.\n") channelNames.extend(self.__filenameToChannelList__(images[ifo])) if (not ranksAQ[ifo]) and (imagesAQ[ifo]): sys.stdout.write("Converting AnalyzeQscan filenames to channel names.\n") channelNames.extend(self.__filenameToChannelList__(imagesAQ[ifo])) #From all available channel names make a UNIQUE listing! #Simplify channel names Kill L1:, L0: etc ... channelNames=[x.strip().split(":",1)[1] for x in channelNames] uniqChannelNames=list() lastName=None channelNames.sort() while channelNames: myName=channelNames.pop() if lastName != myName: lastName=myName uniqChannelNames.append(myName) #Check if uniqChannelNames list empty if len(uniqChannelNames) < 1: sys.stderr.write("Warning: [%s] No channels available to plot in table!\n"%ifo) uniqChannelNames.append("No_Channels_To_Display") ranksAQ[ifo]=list() ranksOmega[ifo]=list() #Extract only channel ranks which are available to plot! trimRanksOmega=list() trimRanksAQ=list() while ranksOmega[ifo]: nameRO=ranksOmega[ifo].pop() #If at least 1 match if str(nameRO[0]).strip().split(":",1)[1] in uniqChannelNames: trimRanksOmega.append(nameRO) while ranksAQ[ifo]: nameRAQ=ranksAQ[ifo].pop() #If at least 1 match if str(nameRAQ[0]).strip().split(":",1)[1] in uniqChannelNames: trimRanksAQ.append(nameRAQ) # Configure table columns colCount=3 # Create short list count shortListLength=3*colCount #Create a short list for analyzeQscan if available shortList=list() if trimRanksAQ: tmpList=[[x[2],x] for x in trimRanksAQ] tmpList.sort(reverse=True) shortList=[x[1] for x in tmpList][0:min(len(tmpList),shortListLength)] #Select channels to plot if shortlist gt zero else plot all! if shortList: shortListChannels=[a for a,b,c in shortList] else: shortListChannels=uniqChannelNames #Create table object fullRows,modRows=divmod(len(shortListChannels),colCount) if modRows > 0: rowCount=fullRows+1 else: rowCount=fullRows myTable=self.wikiTable(rowCount,colCount) myTable.setTableStyle("text-align:center") #Insert HTML links and IFO Label contentString="" contentString=contentString+" %s "%(ifo) #Add html links for table title for newLink in indexes[ifo]: contentString=contentString+" %s "%self.makeExternalLink(newLink,"Qscan") for newLink in indexesAQ[ifo]: contentString=contentString+" %s "%self.makeExternalLink(newLink,"analyzeQscan") myTable.setTableHeadline(contentString) #Start filling cells with Qscan and analyzeQscan scatter plot for cellNum,channel in enumerate(shortListChannels): #Grab plot info for this channel name #Search and replace ":" -> "_" Remeber for analyzeQscan filenames! #Qscan filenames use ":" and analyzeQscan filenames use "_"! myName=channel try: myOmegaIndex=[x.__contains__(myName) for x in images[ifo]].index(True) except ValueError: myOmegaIndex=None try: myOmegaIndexT=[x.__contains__(myName) for x in thumbs[ifo]].index(True) except ValueError: myOmegaIndexT=None try: myAQIndex=[x.__contains__(myName.replace(":","_")) \ for x in imagesAQ[ifo]].index(True) except ValueError: myAQIndex=None try: myAQIndexT=[x.__contains__(myName.replace(":","_")) \ for x in thumbsAQ[ifo]].index(True) except ValueError: myAQIndexT=None cellString="" #If there was a shortList add the Z value percentage to table! if myName and len(shortList) > 0: #Find rank myRank=0.0 for sName,sZ,sP in shortList: if sName.__contains__(myName): myRank=sP cellString=cellString+" %s Z-Percentage:%1.2f <<BR>> "%(myName,float(myRank)) elif myName: cellString=cellString+" %s <<BR>> "%myName else: cellString=cellString+" Unknown_Channel <<BR>> " if myOmegaIndex!=None: cellString=cellString+" %s "%self.linkedRemoteImage(thumbs[ifo][myOmegaIndexT], images[ifo][myOmegaIndex]) else: cellString=cellString+" Unavailable_Qscan <<BR>> " if myAQIndex!=None: cellString=cellString+" %s "%self.linkedRemoteImage(thumbsAQ[ifo][myAQIndexT], imagesAQ[ifo][myAQIndex]) else: cellString=cellString+" Unavailable_analyzeQScan <<BR>> " #Add string to cell myRow,myCol=divmod(cellNum,colCount) myTable.data[myRow][myCol]=" %s "%cellString self.insertTable(myTable) | b5036db122ffd4acba48f4b4049b118c8bb775f4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b5036db122ffd4acba48f4b4049b118c8bb775f4/makeCheckListWiki.py |
os.stdout.write("Path information to background pickle unchanged.\n") | sys.stdout.write("Path information to background pickle unchanged.\n") | def resetPicklePointer(self,filename=None): """ If you called the class definition with the wrong pickle path. You can reset it with this method. """ if filename==None: os.stdout.write("Path information to background pickle unchanged.\n") elif filename.__contains__("~"): self.__backgroundPickle__=os.path.expanduser(filename) else: self.__backgroundPickle__=filename | 15b8c74e87af07e0931c02fecdfbb15f7dc804da /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/15b8c74e87af07e0931c02fecdfbb15f7dc804da/fu_utils.py |
os.stdout.write("Specify trigger time please.\n") | sys.stdout.write("Specify trigger time please.\n") | def fetchInformationDualWindow(self,triggerTime=None,frontWindow=300,\ backWindow=150,ifoList='DEFAULT'): """ This method is responsible for queries to the data server. The results of the query become an internal list that can be converted into an HTML table. The arguments allow you to query with trigger time of interest and to change the window with each call if desired. The version argument will fetch segments with that version or higher. """ if ifoList=="DEFAULT": ifoList=interferometers if (ifoList == None) or \ (len(ifoList) < 1): sys.stderr.write("Ifolist passed is malformed! : %s\n"%ifoList) return #Set the internal class variable self.ifos self.ifos=ifoList if sum([x.upper() in interferometers for x in ifoList]) < 1: sys.stderr.write("Valid ifos not specified for DQ lookups. %s\n"%ifoList) return triggerTime=float(triggerTime) if triggerTime==int(-1): os.stdout.write("Specify trigger time please.\n") return else: self.triggerTime = float(triggerTime) gpsEnd=int(triggerTime)+int(backWindow) gpsStart=int(triggerTime)-int(frontWindow) sqlString=self.dqvQueryLatestVersion%(gpsEnd,gpsStart) self.resultList=self.query(sqlString) if len(self.resultList) < 1: sys.stdout.write("Query Completed, Nothing Returned for time %s.\n"%(triggerTime)) #Coalesce the segments for each DQ flag #Reparse the information newDQSeg=list() if self.resultList.__len__() > 0: #Obtain list of all flags, ignore IFOs not specified uniqSegmentName=list() for ifo,name,version,comment,start,end in self.resultList: if (not uniqSegmentName.__contains__((ifo,name,version,comment))) and \ (ifo.strip().upper() in ifoList): uniqSegmentName.append((ifo,name,version,comment)) #Add the SCIENCE segment no matter which IFOs are specified! if ((name.lower().__contains__('science')) and \ not (ifo.strip().upper() in ifoList)): uniqSegmentName.append((ifo,name,version,comment)) #Save textKey for all uniq segments combos for uifo,uname,uversion,ucomment in uniqSegmentName: segmentIntervals=list() #Extra segments based on uniq textKey for ifo,name,version,comment,start,end in self.resultList: if (uifo,uname,uversion,ucomment)==(ifo,name,version,comment): segmentIntervals.append((start,end)) segmentIntervals.sort() #Coalesce those segments newSegmentIntervals=self.__merge__(segmentIntervals) #Write them to the object which we will return for newStart,newStop in newSegmentIntervals: newDQSeg.append([uifo,uname,uversion,ucomment,newStart,newStop]) newDQSeg.sort() del segmentIntervals #Reset the result list to the IFO restricted set self.resultList=newDQSeg return newDQSeg | 15b8c74e87af07e0931c02fecdfbb15f7dc804da /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/15b8c74e87af07e0931c02fecdfbb15f7dc804da/fu_utils.py |
os.stdout.write("Problem saving pickle of DQ information.") os.stdout.write("Trying to place pickle in your home directory.") | sys.stdout.write("Problem saving pickle of DQ information.") sys.stdout.write("Trying to place pickle in your home directory.") | def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True except: backgroundPickle=False sys.stderr.write("Error importing the pickle file! %s\n"\ %(pickleLocale)) return for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \ | 15b8c74e87af07e0931c02fecdfbb15f7dc804da /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/15b8c74e87af07e0931c02fecdfbb15f7dc804da/fu_utils.py |
os.stdout.write("Really ignoring pickle generation now!\n") | sys.stdout.write("Really ignoring pickle generation now!\n") | def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True except: backgroundPickle=False sys.stderr.write("Error importing the pickle file! %s\n"\ %(pickleLocale)) return for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \ | 15b8c74e87af07e0931c02fecdfbb15f7dc804da /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/15b8c74e87af07e0931c02fecdfbb15f7dc804da/fu_utils.py |
def append_process(xmldoc, comment = None, force = None, ds_sq_threshold = None, save_small_coincs = None, vetoes_name = None, verbose = None): | def append_process(xmldoc, comment = None, force = None, ds_sq_threshold = None, save_small_coincs = None, vetoes_name = None, coinc_end_time_segment = None, verbose = None): | def append_process(xmldoc, comment = None, force = None, ds_sq_threshold = None, save_small_coincs = None, vetoes_name = None, verbose = None): process = llwapp.append_process(xmldoc, program = process_program_name, version = __version__, cvs_repository = u"lscsoft", cvs_entry_time = __date__, comment = comment) params = [ (u"--ds-sq-threshold", u"real_8", ds_sq_threshold) ] if comment is not None: params += [(u"--comment", u"lstring", comment)] if force is not None: params += [(u"--force", None, None)] if save_small_coincs is not None: params += [(u"--save-small-coincs", None, None)] if vetoes_name is not None: params += [(u"--vetoes-name", u"lstring", vetoes_name)] if verbose is not None: params += [(u"--verbose", None, None)] ligolw_process.append_process_params(xmldoc, process, params) return process | 7a69c0f45d6bbc2512002197ea2ccb2122c44204 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/7a69c0f45d6bbc2512002197ea2ccb2122c44204/ligolw_rinca.py |
def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): | def append_coinc(self, process_id, node, coinc_def_id, events): | def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables # | 7a69c0f45d6bbc2512002197ea2ccb2122c44204 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/7a69c0f45d6bbc2512002197ea2ccb2122c44204/ligolw_rinca.py |
time_slide_id = node.time_slide_id | def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables # | 7a69c0f45d6bbc2512002197ea2ccb2122c44204 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/7a69c0f45d6bbc2512002197ea2ccb2122c44204/ligolw_rinca.py |
|
tstart = events[0].get_start() + self.time_slide_index[time_slide_id][events[0].ifo] coinc_ringdown.set_start(tstart + sum(event.snr * float(event.get_start() + self.time_slide_index[time_slide_id][event.ifo] - tstart) for event in events) / sum(event.snr for event in events)) | tstart = coinc_ringdown_start(events, node.offset_vector) coinc_ringdown.set_start(tstart) | def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables # | 7a69c0f45d6bbc2512002197ea2ccb2122c44204 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/7a69c0f45d6bbc2512002197ea2ccb2122c44204/ligolw_rinca.py |
tstart = coinc_ringdown.get_start() | def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables # | 7a69c0f45d6bbc2512002197ea2ccb2122c44204 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/7a69c0f45d6bbc2512002197ea2ccb2122c44204/ligolw_rinca.py |
|
coinc_tables.append_coinc(process_id, node.time_slide_id, coinc_def_id, ntuple) | coinc_tables.append_coinc(process_id, node, coinc_def_id, ntuple) | def ligolw_rinca( xmldoc, process_id, EventListType, CoincTables, coinc_definer_row, event_comparefunc, thresholds, ntuple_comparefunc = lambda events, offset_vector: False, small_coincs = False, veto_segments = None, verbose = False | 7a69c0f45d6bbc2512002197ea2ccb2122c44204 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/7a69c0f45d6bbc2512002197ea2ccb2122c44204/ligolw_rinca.py |
coinc_tables.append_coinc(process_id, node.time_slide_id, coinc_def_id, ntuple) | coinc_tables.append_coinc(process_id, node, coinc_def_id, ntuple) | def ligolw_rinca( xmldoc, process_id, EventListType, CoincTables, coinc_definer_row, event_comparefunc, thresholds, ntuple_comparefunc = lambda events, offset_vector: False, small_coincs = False, veto_segments = None, verbose = False | 7a69c0f45d6bbc2512002197ea2ccb2122c44204 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/7a69c0f45d6bbc2512002197ea2ccb2122c44204/ligolw_rinca.py |
f = os.path.basename(f) | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 0e89d16bc15acc69ffa5e82b0fc52283d4a61951 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0e89d16bc15acc69ffa5e82b0fc52283d4a61951/pipeline.py |
|
f = os.path.basename(f) | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 0e89d16bc15acc69ffa5e82b0fc52283d4a61951 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0e89d16bc15acc69ffa5e82b0fc52283d4a61951/pipeline.py |
|
xml = '<filename file="%s" />' % os.path.basename(f) | xml = '<filename file="%s" />' % f | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 0e89d16bc15acc69ffa5e82b0fc52283d4a61951 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0e89d16bc15acc69ffa5e82b0fc52283d4a61951/pipeline.py |
f = os.path.basename(f) | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 0e89d16bc15acc69ffa5e82b0fc52283d4a61951 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0e89d16bc15acc69ffa5e82b0fc52283d4a61951/pipeline.py |
|
f = os.path.basename(f) | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 0e89d16bc15acc69ffa5e82b0fc52283d4a61951 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0e89d16bc15acc69ffa5e82b0fc52283d4a61951/pipeline.py |
|
def update_ids(connection, verbose = False): | def update_ids(connection, xmldoc=None, verbose = False): | def update_ids(connection, verbose = False): """ For internal use only. """ table_elems = dbtables.get_xml(connection).getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100.0 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection) | af592752cf35b9a561a2351661d817b99a8d50ab /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/af592752cf35b9a561a2351661d817b99a8d50ab/ligolw_sqlite.py |
table_elems = dbtables.get_xml(connection).getElementsByTagName(ligolw.Table.tagName) | if xmldoc: table_elems = xmldoc.getElementsByTagName(ligolw.Table.tagName) else: table_elems = dbtables.get_xml(connection).getElementsByTagName(ligolw.Table.tagName) | def update_ids(connection, verbose = False): """ For internal use only. """ table_elems = dbtables.get_xml(connection).getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100.0 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection) | af592752cf35b9a561a2351661d817b99a8d50ab /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/af592752cf35b9a561a2351661d817b99a8d50ab/ligolw_sqlite.py |
utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")).unlink() | xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) | def insert_from_url(connection, url, preserve_ids = False, verbose = False): """ Parse and insert the LIGO Light Weight document at the URL into the database the at the given connection. """ # # load document. this process inserts the document's contents into # the database. the document is unlinked to delete database cursor # objects it retains # utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")).unlink() # # update references to row IDs # if not preserve_ids: update_ids(connection, verbose) | af592752cf35b9a561a2351661d817b99a8d50ab /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/af592752cf35b9a561a2351661d817b99a8d50ab/ligolw_sqlite.py |
update_ids(connection, verbose) | update_ids(connection, xmldoc, verbose) xmldoc.unlink() | def insert_from_url(connection, url, preserve_ids = False, verbose = False): """ Parse and insert the LIGO Light Weight document at the URL into the database the at the given connection. """ # # load document. this process inserts the document's contents into # the database. the document is unlinked to delete database cursor # objects it retains # utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")).unlink() # # update references to row IDs # if not preserve_ids: update_ids(connection, verbose) | af592752cf35b9a561a2351661d817b99a8d50ab /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/af592752cf35b9a561a2351661d817b99a8d50ab/ligolw_sqlite.py |
connection.commit() | def insert_from_xmldoc(connection, xmldoc, preserve_ids = False, verbose = False): """ Insert the tables from an in-ram XML document into the database at the given connection. """ # # iterate over tables in the XML tree, reconstructing each inside # the database # for tbl in xmldoc.getElementsByTagName(ligolw.Table.tagName): # # instantiate the correct table class # name = dbtables.table.StripTableName(tbl.getAttribute("Name")) if name in dbtables.TableByName: dbtab = dbtables.TableByName[name](tbl.attributes, connection = connection) else: dbtab = dbtables.DBTable(tbl.attributes, connection = connection) # # copy table element child nodes from source XML tree # for elem in tbl.childNodes: if elem.tagName == dbtables.table.TableStream.tagName: dbtab._end_of_columns() dbtab.appendChild(type(elem)(elem.attributes)) # # copy table rows from source XML tree # for row in tbl: dbtab.append(row) dbtab._end_of_rows() # # unlink to delete cursor objects # dbtab.unlink() # # update references to row IDs # if not preserve_ids: update_ids(connection, verbose) | af592752cf35b9a561a2351661d817b99a8d50ab /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/af592752cf35b9a561a2351661d817b99a8d50ab/ligolw_sqlite.py |
|
update_ids(connection, verbose) | update_ids(connection, None, verbose) | def insert_from_xmldoc(connection, xmldoc, preserve_ids = False, verbose = False): """ Insert the tables from an in-ram XML document into the database at the given connection. """ # # iterate over tables in the XML tree, reconstructing each inside # the database # for tbl in xmldoc.getElementsByTagName(ligolw.Table.tagName): # # instantiate the correct table class # name = dbtables.table.StripTableName(tbl.getAttribute("Name")) if name in dbtables.TableByName: dbtab = dbtables.TableByName[name](tbl.attributes, connection = connection) else: dbtab = dbtables.DBTable(tbl.attributes, connection = connection) # # copy table element child nodes from source XML tree # for elem in tbl.childNodes: if elem.tagName == dbtables.table.TableStream.tagName: dbtab._end_of_columns() dbtab.appendChild(type(elem)(elem.attributes)) # # copy table rows from source XML tree # for row in tbl: dbtab.append(row) dbtab._end_of_rows() # # unlink to delete cursor objects # dbtab.unlink() # # update references to row IDs # if not preserve_ids: update_ids(connection, verbose) | af592752cf35b9a561a2351661d817b99a8d50ab /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/af592752cf35b9a561a2351661d817b99a8d50ab/ligolw_sqlite.py |
from pylal import antenna | from pylal import antenna | def directional_horizon(ifos, RA, dec, gps_time, horizons=None): """ Return a dictionary of sensitivity numbers for each detector, based on a known sky location and an optional input dictionary of inspiral horizon distances for a reference source of the user's choice. If the horizons dictionary is specified, the returned values are interpreted as inspiral horizons in that direction. """ # Convert type if necessary if type(gps_time)==int: gps_time=float(gps_time) | c6b08930ed76a8d14be2b87e66f0d39d953cc479 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/c6b08930ed76a8d14be2b87e66f0d39d953cc479/grbsummary.py |
threshs[det]=min_threshold*(horizons[det]/worst_horizon) | threshs[det]=min_threshold*(horizons[det]/worst_horizon) | def detector_thresholds(horizons,min_threshold,max_threshold=7.5): """ Return a set of detector thresholds adjusted for a particular set of inspiral horizon distances (calculated with directional_horizon). The min_threshold specified the minimum threshold which will be set for all detectors less sensitive than the best one. The most sensitive detector will have its threshold adjusted upward to a maximum of max_threshold. """ assert min_threshold < max_threshold threshs={} worst_horizon=min(horizons.values()) best_horizon=max(horizons.values()) # Assuming that lowest threshold is in worst detector, return thresholds for det in horizons.keys(): if horizons[det]<best_horizon: threshs[det]=min_threshold else: threshs[det]=min_threshold*(horizons[det]/worst_horizon) if threshs[det]>max_threshold: threshs[det]=max_threshold return threshs | c6b08930ed76a8d14be2b87e66f0d39d953cc479 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/c6b08930ed76a8d14be2b87e66f0d39d953cc479/grbsummary.py |
def fetchInformation(self,triggerTime=None,window=300): """ Wrapper for fetchInformationDualWindow that mimics original behavior """ return self.fetchInformationDualWindow(triggerTime,window,window,ifoList='DEFAULT') | cdbfaee53521829f68e4254203dcf2bd034ed537 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cdbfaee53521829f68e4254203dcf2bd034ed537/fu_utils.py |
||
later. """ | later. """ | def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \ | cdbfaee53521829f68e4254203dcf2bd034ed537 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cdbfaee53521829f68e4254203dcf2bd034ed537/fu_utils.py |
for (ifo,epoch) in ifoEpochList: | except: backgroundPickle=False sys.stderr.write("Error importing the pickle file! %s\n"\ %(pickleLocale)) return for (ifo,epoch) in ifoEpochList: | def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \ | cdbfaee53521829f68e4254203dcf2bd034ed537 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cdbfaee53521829f68e4254203dcf2bd034ed537/fu_utils.py |
generated background %s"%self.__backgroundDict__["ifoepoch"] except: backgroundPickle=False sys.stderr.write("Error importing the pickle file!\n") if os.access(os.path.split(pickleLocal)[0],os.W_OK): os.path.rename(pickleLocale,pickleLocale+".corrupt") | generated background expected %s got %s"%(\ self.__backgroundDict__["ifoepoch"], ifoEpochList) | def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \ | cdbfaee53521829f68e4254203dcf2bd034ed537 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cdbfaee53521829f68e4254203dcf2bd034ed537/fu_utils.py |
ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] | ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in self.ifos] | def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating | cdbfaee53521829f68e4254203dcf2bd034ed537 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cdbfaee53521829f68e4254203dcf2bd034ed537/fu_utils.py |
sys.stderr.write("Aborting tabulate of binomial P\n") | sys.stderr.write("Aborting tabulation of binomial P\n") | def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating | cdbfaee53521829f68e4254203dcf2bd034ed537 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cdbfaee53521829f68e4254203dcf2bd034ed537/fu_utils.py |
for myIfo,flagList in seenFlags.iteritems(): | for myIfo in seenFlags.keys(): | def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating | cdbfaee53521829f68e4254203dcf2bd034ed537 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cdbfaee53521829f68e4254203dcf2bd034ed537/fu_utils.py |
for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) | if myIfo.strip() not in self.__backgroundDict__.keys(): if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for outsideFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][outsideFlag]=float(-0.0) else: for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) | def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating | cdbfaee53521829f68e4254203dcf2bd034ed537 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cdbfaee53521829f68e4254203dcf2bd034ed537/fu_utils.py |
tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) | tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,\ size,myBackgroundRank,myCategory) | def generateHTMLTable(self,tableType="BOTH"): """ Return a HTML table already formatted using the module MARKUP to keep the HTML tags complient. This method does nothing but return the result of the last call to self.fetchInformation() The flag names associated with LIGO will have links to the channel wiki in them also. Types that will invoke a not everything behaviour are DQ and VETO """ ligo=["L1","H1","H2","V1"] channelWiki="https://ldas-jobs.ligo.caltech.edu/cgi-bin/chanwiki?%s" if self.triggerTime==int(-1): return "" myColor="grey" tableString="<table bgcolor=grey border=1px>" titleString="<tr>" tableEmptyString="<tr bgcolor=%s>"%myColor rowString="<tr bgcolor=%s> " for col in self.__columns__: titleString+="<th>%s</th>"%col rowString+="<td>%s</td>" tableEmptyString+="<td>None</td>" titleString+="</tr>\n" tableEmptyString+="</tr>\n" rowString+="</tr>\n" tableString+=titleString if len(self.resultList) == 0: tableString+=tableEmptyString for ifo,name,version,comment,start,stop in self.resultList: #If we have background information fetch it if self.__haveBackgroundDict__: myBackgroundRank=str("%3.1f"%(100.0*self.__backgroundResults__[ifo][name])).rjust(5) else: myBackgroundRank="None" if self.__havecategories__: myCategory=self.__category__[ifo][name] else: myCategory="None" offset1=start-self.triggerTime offset2=stop-self.triggerTime size=int(stop-start) if (offset1>=0) and (offset2>=0): myColor="green" if (offset1<=0) and (offset2<=0): myColor="yellow" if (offset1<=0) and (offset2>=0): myColor="red" if name.lower().__contains__('science'): myColor="skyblue" if tableType.upper().strip() == "DQ": if not name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) elif tableType.upper().strip() == "VETO": if name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) elif tableType.upper().strip() not in ["VETO","DQ"]: tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,size,\ myBackgroundRank,myCategory) tableString+="</table>" return tableString | cdbfaee53521829f68e4254203dcf2bd034ed537 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cdbfaee53521829f68e4254203dcf2bd034ed537/fu_utils.py |
tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) | tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,\ size,myBackgroundRank,myCategory) | def generateHTMLTable(self,tableType="BOTH"): """ Return a HTML table already formatted using the module MARKUP to keep the HTML tags complient. This method does nothing but return the result of the last call to self.fetchInformation() The flag names associated with LIGO will have links to the channel wiki in them also. Types that will invoke a not everything behaviour are DQ and VETO """ ligo=["L1","H1","H2","V1"] channelWiki="https://ldas-jobs.ligo.caltech.edu/cgi-bin/chanwiki?%s" if self.triggerTime==int(-1): return "" myColor="grey" tableString="<table bgcolor=grey border=1px>" titleString="<tr>" tableEmptyString="<tr bgcolor=%s>"%myColor rowString="<tr bgcolor=%s> " for col in self.__columns__: titleString+="<th>%s</th>"%col rowString+="<td>%s</td>" tableEmptyString+="<td>None</td>" titleString+="</tr>\n" tableEmptyString+="</tr>\n" rowString+="</tr>\n" tableString+=titleString if len(self.resultList) == 0: tableString+=tableEmptyString for ifo,name,version,comment,start,stop in self.resultList: #If we have background information fetch it if self.__haveBackgroundDict__: myBackgroundRank=str("%3.1f"%(100.0*self.__backgroundResults__[ifo][name])).rjust(5) else: myBackgroundRank="None" if self.__havecategories__: myCategory=self.__category__[ifo][name] else: myCategory="None" offset1=start-self.triggerTime offset2=stop-self.triggerTime size=int(stop-start) if (offset1>=0) and (offset2>=0): myColor="green" if (offset1<=0) and (offset2<=0): myColor="yellow" if (offset1<=0) and (offset2>=0): myColor="red" if name.lower().__contains__('science'): myColor="skyblue" if tableType.upper().strip() == "DQ": if not name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) elif tableType.upper().strip() == "VETO": if name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) elif tableType.upper().strip() not in ["VETO","DQ"]: tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,size,\ myBackgroundRank,myCategory) tableString+="</table>" return tableString | cdbfaee53521829f68e4254203dcf2bd034ed537 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cdbfaee53521829f68e4254203dcf2bd034ed537/fu_utils.py |
tableString+=emptyRowString%myColor | tableString+=emptyRowString | def generateMOINMOINTable(self,tableType="BOTH"): """ Return a MOINMOIN table. """ ligo=["L1","H1","H2","V1"] channelWiki="https://ldas-jobs.ligo.caltech.edu/cgi-bin/chanwiki?%s" if self.triggerTime==int(-1): return "" myColor="grey" tableString="" titleString="" emptyRowString="" rowString="" for i,col in enumerate(self.__columns__): if i == 0: titleString+="""||<rowbgcolor="%s"> %s """%(myColor,col) rowString+="""||<rowbgcolor="%s"> %s """ emptyRowString+="""||<rowbgcolor="%s"> None """%myColor else: titleString+="""|| %s """%col rowString+="""|| %s """ emptyRowString+="""|| None """ titleString+="""||\n""" rowString+="""||\n""" emptyRowString+="""||\n""" tableString+=titleString #Extract only DQ row or only VETO rows tmpResultList=list() for myRow in self.resultList: ifo,name,version,comment,start,stop=myRow #Select base on table type if ((tableType.upper() == "DQ") and \ (not name.strip().upper().startswith("UPV"))): tmpResultList.append(myRow) elif ((tableType.upper() == "VETO") and \ (name.strip().upper().startswith("UPV"))): tmpResultList.append(myRow) elif tableType.upper().strip() not in ["VETO","DQ"]: tmpResultList.append(myRow) if len(tmpResultList) == 0: tableString+=emptyRowString%myColor for ifo,name,version,comment,start,stop in tmpResultList: #If we have background information fetch it if self.__haveBackgroundDict__: myBackgroundRank=str("%3.1f"%(100.0*self.__backgroundResults__[ifo][name])).rjust(5) else: myBackgroundRank="None" if self.__havecategories__: myCategory=self.__category__[ifo][name] else: myCategory="None" offset1=start-self.triggerTime offset2=stop-self.triggerTime size=int(stop-start) if (offset1>=0) and (offset2>=0): myColor="green" if (offset1<=0) and (offset2<=0): myColor="yellow" if (offset1<=0) and (offset2>=0): myColor="red" if name.lower().__contains__('science'): myColor="skyblue" tableString+=rowString%(myColor,str(ifo).strip(),name,version,\ start,offset1,stop,offset2,size,\ myBackgroundRank,myCategory) tableString+="\n" return tableString | cdbfaee53521829f68e4254203dcf2bd034ed537 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cdbfaee53521829f68e4254203dcf2bd034ed537/fu_utils.py |
os.path.join("bin", "coh_PTF_html_summary"), os.path.join("bin", "coh_PTF_injfinder"), os.path.join("bin", "coh_PTF_sbv_plotter"), os.path.join("bin", "coh_PTF_trig_cluster"), os.path.join("bin", "coh_PTF_trig_combiner"), os.path.join("bin", "ring_post"), | os.path.join("bin", "coh_PTF_html_summary"), os.path.join("bin", "coh_PTF_injfinder"), os.path.join("bin", "coh_PTF_sbv_plotter"), os.path.join("bin", "coh_PTF_trig_cluster"), os.path.join("bin", "coh_PTF_trig_combiner"), os.path.join("bin", "ring_post"), | def run(self): # remove the automatically generated user env scripts for script in ["pylal-user-env.sh", "pylal-user-env.csh"]: log.info("removing " + script ) try: os.unlink(os.path.join("etc", script)) except: pass | 131cdbcf8d05d29ecc6d7d62bd9d25aa1a11600d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/131cdbcf8d05d29ecc6d7d62bd9d25aa1a11600d/setup.py |
for tbl in dbtables.get_xml(connection).getElementsByTagName(ligolw.Table.tagName): tbl.sync_next_id() | dbtables.idmap_sync(connection) | def setup(target, check_same_thread=True): connection = sqlite3.connect(target, check_same_thread=check_same_thread) dbtables.DBTable_set_connection(connection) for tbl in dbtables.get_xml(connection).getElementsByTagName(ligolw.Table.tagName): tbl.sync_next_id() return connection | 203e7af377f630621d055164eb0ff35d39290061 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/203e7af377f630621d055164eb0ff35d39290061/ligolw_sqlite.py |
def update_ids(xmldoc, connection, verbose = False): | def update_ids(connection, verbose = False): | def update_ids(xmldoc, connection, verbose = False): """ For internal use only. """ table_elems = xmldoc.getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection) | 203e7af377f630621d055164eb0ff35d39290061 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/203e7af377f630621d055164eb0ff35d39290061/ligolw_sqlite.py |
table_elems = xmldoc.getElementsByTagName(ligolw.Table.tagName) | table_elems = dbtables.get_xml(connection).getElementsByTagName(ligolw.Table.tagName) | def update_ids(xmldoc, connection, verbose = False): """ For internal use only. """ table_elems = xmldoc.getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection) | 203e7af377f630621d055164eb0ff35d39290061 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/203e7af377f630621d055164eb0ff35d39290061/ligolw_sqlite.py |
print >>sys.stderr, "updating IDs: %d%%\r" % (100 * i / len(table_elems)), | print >>sys.stderr, "updating IDs: %d%%\r" % (100.0 * i / len(table_elems)), | def update_ids(xmldoc, connection, verbose = False): """ For internal use only. """ table_elems = xmldoc.getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection) | 203e7af377f630621d055164eb0ff35d39290061 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/203e7af377f630621d055164eb0ff35d39290061/ligolw_sqlite.py |
Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ | Iterate over a sequence of URLs, calling insert_from_url() on each, then build the indexes indicated by the metadata in lsctables.py. """ | def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append | 203e7af377f630621d055164eb0ff35d39290061 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/203e7af377f630621d055164eb0ff35d39290061/ligolw_sqlite.py |
def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append | 203e7af377f630621d055164eb0ff35d39290061 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/203e7af377f630621d055164eb0ff35d39290061/ligolw_sqlite.py |
||
def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append | 203e7af377f630621d055164eb0ff35d39290061 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/203e7af377f630621d055164eb0ff35d39290061/ligolw_sqlite.py |
||
def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append | 203e7af377f630621d055164eb0ff35d39290061 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/203e7af377f630621d055164eb0ff35d39290061/ligolw_sqlite.py |
||
xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) if not preserve_ids: update_ids(xmldoc, connection, verbose) xmldoc.unlink() | insert_from_url(connection, url, preserve_ids = preserve_ids, verbose = verbose) | def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append | 203e7af377f630621d055164eb0ff35d39290061 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/203e7af377f630621d055164eb0ff35d39290061/ligolw_sqlite.py |
dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append def insert_from_xmldoc(connection, xmldoc, preserve_ids = False, verbose = False): """ Insert the tables from an in-ram XML document into the database at the given connection. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: dbtables.DBTable.append = dbtables.DBTable._append for tbl in xmldoc.getElementsByTagName(ligolw.Table.tagName): dbtab = dbtables.DBTable(tbl.attributes, connection=connection) for elem in tbl.childNodes: if isinstance(elem, dbtables.table.TableStream): dbtab._end_of_columns() dbtab.appendChild(type(elem)(elem.attributes)) for row in tbl: dbtab.append(row) dbtab._end_of_rows() if not preserve_ids: update_ids(dbtables.get_xml(connection), connection, verbose) | def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append | 203e7af377f630621d055164eb0ff35d39290061 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/203e7af377f630621d055164eb0ff35d39290061/ligolw_sqlite.py |
|
htmlfile.write('<table border=1><tr>') | htmlfile.write('<table border=1 width=100%><tr>') | def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo) | 8efa0901ef9914c859030cd2ae988cc006b9e1d8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/8efa0901ef9914c859030cd2ae988cc006b9e1d8/cbcBayesSkyRes.py |
oneDplotPath=os.path.join(outdir,param+'.png') | figname=param+'.png' oneDplotPath=os.path.join(outdir,figname) | def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo) | 8efa0901ef9914c859030cd2ae988cc006b9e1d8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/8efa0901ef9914c859030cd2ae988cc006b9e1d8/cbcBayesSkyRes.py |
myfig.savefig(os.path.join(outdir,param+'_samps.png')) | myfig.savefig(os.path.join(outdir,figname.replace('.png','_samps.png'))) | def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo) | 8efa0901ef9914c859030cd2ae988cc006b9e1d8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/8efa0901ef9914c859030cd2ae988cc006b9e1d8/cbcBayesSkyRes.py |
oneDplotPaths.append(oneDplotPath) | oneDplotPaths.append(figname) | def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo) | 8efa0901ef9914c859030cd2ae988cc006b9e1d8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/8efa0901ef9914c859030cd2ae988cc006b9e1d8/cbcBayesSkyRes.py |
oneDMenu=['mtotal','m1','m2','mchirp','mc','distance','distMPC','dist','iota','eta','RA','dec','a1','a2','phi1','theta1','phi2','theta2'] | oneDMenu=['mtotal','m1','m2','mchirp','mc','distance','distMPC','dist','iota','psi','eta','RA','dec','a1','a2','phi1','theta1','phi2','theta2'] | def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo) | 8efa0901ef9914c859030cd2ae988cc006b9e1d8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/8efa0901ef9914c859030cd2ae988cc006b9e1d8/cbcBayesSkyRes.py |
twoDplots=[['mc','eta'],['mchirp','eta'],['m1','m2'],['mtotal','eta'],['distance','iota'],['dist','iota'],['RA','dec'],['m1','dist'],['m2','dist']] | twoDplots=[['mc','eta'],['mchirp','eta'],['m1','m2'],['mtotal','eta'],['distance','iota'],['dist','iota'],['RA','dec'],['m1','dist'],['m2','dist'],['psi','iota'],['psi','distance'],['psi','dist'],['psi','phi0']] | def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo) | 8efa0901ef9914c859030cd2ae988cc006b9e1d8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/8efa0901ef9914c859030cd2ae988cc006b9e1d8/cbcBayesSkyRes.py |
dax_usertag = node.get_user_tag() if dax_usertag: pegasus_exec_subdir = os.path.join(dax_subdir,dax_usertag) else: pegasus_exec_subdir = dax_subdir xml += """--dir %s """ % pegasus_exec_subdir | xml += """--dir %s """ % dax_subdir | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 2219fef30781594185a08025fae24b41130ddde7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/2219fef30781594185a08025fae24b41130ddde7/pipeline.py |
given an instance of skypoints populate and return skylocinjtable | record injection data in a skylocinjtable | def populate_SkyLocInjTable(skylocinjtable,coinc,dt_area,rank_area, \ dtrss_inj,dDrss_inj): """ given an instance of skypoints populate and return skylocinjtable """ row = skylocinjtable.RowType() row.end_time = coinc.time rhosquared = 0.0 for ifo in coinc.ifo_list: rhosquared += coinc.snr[ifo]*coinc.snr[ifo] row.comb_snr = sqrt(rhosquared) try: row.h1_snr = coinc.snr['H1'] except: row.h1_snr = None try: row.l1_snr = coinc.snr['L1'] except: row.l1_snr = None try: row.v1_snr = coinc.snr['V1'] except: row.v1_snr = None row.ra = coinc.longitude_inj row.dec = coinc.latitude_inj row.dt_area = dt_area row.rank_area = rank_area row.delta_t_rss = dtrss_inj row.delta_D_rss = dDrss_inj try: row.h1_eff_distance = coinc.eff_distances_inj['H1'] except: row.h1_eff_distance = None try: row.l1_eff_distance = coinc.eff_distances_inj['L1'] except: row.l1_eff_distance = None try: row.v1_eff_distance = coinc.eff_distances_inj['V1'] except: row.v1_eff_distance = None row.mass1 = coinc.mass1_inj row.mass2 = coinc.mass2_inj skylocinjtable.append(row) | 2c3acdcabcc35de646276ecf68b6440dd34cb559 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/2c3acdcabcc35de646276ecf68b6440dd34cb559/skylocutils.py |
maxdx=max(xrange(0,N),key=lambda i:dot(sampcart,skycarts[i])) | maxdot=0 for i in range(0,N): thisdot=dot(sampcart,skycarts[i]) if thisdot>maxdot: maxdot=thisdot maxdx=i | def skyhist_cart(skycarts,samples): """ Histogram the list of samples into bins defined by Cartesian vectors in skycarts """ dot=numpy.dot N=len(skycarts) print 'operating on %d sky points'%(N) bins=zeros(N) for sample in samples: sampcart=pol2cart(sample[RAdim],sample[decdim]) maxdx=max(xrange(0,N),key=lambda i:dot(sampcart,skycarts[i])) bins[maxdx]+=1 return (skycarts,bins) | 2a85c85c26c47f76e885abc27e93f5171f709c28 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/2a85c85c26c47f76e885abc27e93f5171f709c28/cbcBayesSkyRes.py |
dag.add_node(self) | if opts.do_makeCheckList: dag.add_node(self) | def __init__(self,dag,job,cp,opts): pipeline.CondorDAGNode.__init__(self,job) #Specify pipe location self.add_var_opt('followup-directory',cp.get("makeCheckListWiki", "location").strip()) #Specify pipe ini file self.add_var_opt('ini-file',cp.get("makeCheckListWiki", "ini-file").strip()) if not opts.disable_dag_categories: self.set_category(job.name.lower()) #Add this as child of all known jobs for parentNode in dag.get_nodes(): self.add_parent(parentNode) dag.add_node(self) | 312184e9c5fc13b8218789141d8b7aa13b21265d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/312184e9c5fc13b8218789141d8b7aa13b21265d/stfu_pipe.py |
mv $1 $2/. tar -xzvf $2/$1 | currentPath=`pwd` ; mv $1 $2/. ; cd $2 ; tar -xzvf $1 ; cd $currentPath ; | def setup_distrib_script(self,tag_base): distrib_script = open('distribRemoteScan_'+tag_base+'.sh','w') distrib_script.write("""#!/bin/bash | 0ac39b00f1d66e6fb64eb15d46386a3b5ded1e80 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0ac39b00f1d66e6fb64eb15d46386a3b5ded1e80/stfu_pipe.py |
rm $2/$1 | rm $2/$1 ; | def setup_distrib_script(self,tag_base): distrib_script = open('distribRemoteScan_'+tag_base+'.sh','w') distrib_script.write("""#!/bin/bash | 0ac39b00f1d66e6fb64eb15d46386a3b5ded1e80 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0ac39b00f1d66e6fb64eb15d46386a3b5ded1e80/stfu_pipe.py |
mcmcfilelist += node.outputName' | mcmcfilelist += node.outputName | def __init__(self,job,coinc,cp,opts,dag,ifo,ifonames,p_nodes): pipeline.CondorDAGNode.__init__(self,job) | ed4c05b1e8f4420e17dab587b3f26534eea33ec6 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/ed4c05b1e8f4420e17dab587b3f26534eea33ec6/stfu_pipe.py |
cp.set("fu-condor","mcmc", self.which("lalapps_spinspiral")) | cp.set("fu-condor","spinmcmc", self.which("lalapps_spinspiral")) | def __init__(self, configfile=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind") | ed4c05b1e8f4420e17dab587b3f26534eea33ec6 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/ed4c05b1e8f4420e17dab587b3f26534eea33ec6/stfu_pipe.py |
if int(gpstime)<=endOfS5: | if int(gpstime)<=endOfS5 or ifo=="V1": | def __patchFrameTypeDef__(frametype=None,ifo=None,gpstime=None): """ Temporary patch function, to adjust specfied frame type used in searching the filesystem for files to display in followup. """ if frametype == None: raise Exception, "input to __patchFrameTypeDef__ included a \ frametype argument specified as None\n" return None if gpstime == None: raise Warning, "input to __patchFrameTypeDef__ included a \ gps time argument specified as None\n" return frametype if ifo == None: raise Warning, "input to __patchFrameTypeDef__ included an \ ifo argument specified as None\n" return frametype endOfS5=int(875232014) if int(gpstime)<=endOfS5: if not frametype.lower().startswith(ifo.lower()): return ifo+"_"+frametype return frametype | 145082477fff7e374cb1b7a6e3f1c53ec4e94649 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/145082477fff7e374cb1b7a6e3f1c53ec4e94649/makeCheckListWiki.py |
scatter(plx,ply,s=5,c=numpyasarray(toppoints)[:,2],faceted=False,cmap=matplotlib.cm.jet) | scatter(plx,ply,s=5,c=numpy.asarray(toppoints)[:,2],faceted=False,cmap=matplotlib.cm.jet) | def getinjpar(inj,parnum): if parnum==0: return inj.mchirp if parnum==1: return inj.eta if parnum==2: return inj.get_end() if parnum==3: return inj.phi0 if parnum==4: return inj.distance if parnum==5: return inj.longitude if parnum==6: return inj.latitude if parnum==7: return inj.polarization if parnum==8: return inj.inclination return None | 1334d3f38b9b5e896e6164fc5328782c040709a2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/1334d3f38b9b5e896e6164fc5328782c040709a2/OddsPostProc.py |
def script_dict(): | def script_dict(fname): | def script_dict(): script = {} tog = create_toggle() script[tog] = 'javascript' script['http://ajax.googleapis.com/ajax/libs/jquery/1.2.6/jquery.min.js'] = 'javascript' return (script, [tog]) | 0032a000469d2e6336ec23ced4aff11e7c52de80 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0032a000469d2e6336ec23ced4aff11e7c52de80/cbcwebpage.py |
tog = create_toggle() | tog = os.path.split(create_toggle(fname))[1] | def script_dict(): script = {} tog = create_toggle() script[tog] = 'javascript' script['http://ajax.googleapis.com/ajax/libs/jquery/1.2.6/jquery.min.js'] = 'javascript' return (script, [tog]) | 0032a000469d2e6336ec23ced4aff11e7c52de80 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0032a000469d2e6336ec23ced4aff11e7c52de80/cbcwebpage.py |
return base_dir + '/' + os.path.split(out.rstrip('/'))[1] | return os.path.split(out.rstrip('/'))[1] | def copy_ihope_style(stylefile="cbcwebpage.css", base_dir="."): # FIXME this is a stupid way to find the path... changes to build scripts, set env var? path = which('ligo_data_find') if path: path = os.path.split(path)[0] else: print >>sys.stderr, "COULD NOT FIND STYLE FILES %s IN %s, ABORTING" % (stylefile, path) raise ValueError sys.exit(1) out = path.replace('bin','etc') + '/' + stylefile if not os.path.isfile(out): print >>sys.stderr, "COULD NOT FIND STYLE FILES %s IN %s, ABORTING" % (stylefile, path) raise ValueError sys.exit(1) shutil.copy(out, base_dir) return base_dir + '/' + os.path.split(out.rstrip('/'))[1] | 0032a000469d2e6336ec23ced4aff11e7c52de80 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0032a000469d2e6336ec23ced4aff11e7c52de80/cbcwebpage.py |
if not css: css = copy_ihope_style() scdict = script_dict() | if not css: css = copy_ihope_style(base_dir=path) scdict = script_dict(fname='%s/%s' % (path,"toggle.js")) | def __init__(self, title="cbc web page", path='./', css=None, script=None, pagenum=1, verbose=False): """ """ if not css: css = copy_ihope_style() scdict = script_dict() if not script: script = scdict[0] self.front = "" scriptfiles = scdict[1] self.verbose = verbose self._style = css self._title = title self._script = script self.path = path self.pagenum = pagenum | 0032a000469d2e6336ec23ced4aff11e7c52de80 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0032a000469d2e6336ec23ced4aff11e7c52de80/cbcwebpage.py |
self.subpages[tag] = cbcpage(title=title,css=self._style,script=self._script,pagenum=subpage_num) | self.subpages[tag] = cbcpage(title=title,path=self.path,css=self._style,script=self._script,pagenum=subpage_num) | def add_subpage(self, tag, title, link_text=None): """ """ | 0032a000469d2e6336ec23ced4aff11e7c52de80 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0032a000469d2e6336ec23ced4aff11e7c52de80/cbcwebpage.py |
if trigger.chisq_h1 < 4 or trigger.chisq_l < 4: | if trigger.chisq_h1 < 4 or trigger.chisq_v < 4: | def get_signal_vetoes(trigger,bankq=0,bankn=0,autoq=0,auton=0,chiq=0,chin=0,sigmaVals = None,fResp = None): sbvs = {} q = bankq nhigh = bankn q2 = autoq nhigh2 = auton if trigger.chisq == 0: sbvs['BestNR1'] = 0 else: if trigger.chisq < 60: sbvs['BestNR1'] = trigger.snr else: sbvs['BestNR1'] = trigger.snr/((1 + (trigger.chisq/60.)**(chiq/chin))/2.)**(1./chiq) | e534464ac252b05367aa5150a8409e2effafeb21 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/e534464ac252b05367aa5150a8409e2effafeb21/coh_PTF_pyutils.py |
i=list(nonzero(np.asarray(toppoints)[:,2]==injbin))[0] | i=list(np.nonzero(np.asarray(toppoints)[:,2]==injbin))[0] | def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhist_cart(skycarts,list(pos)) bins=skycarts # Find the bin of the injection if available injbin=None if sky_injpoint: injhist=skyhist_cart(skycarts,array([sky_injpoint])) injbin=injhist.tolist().index(1) print 'Found injection in bin %d with co-ordinates %f,%f .'%(injbin,skypoints[injbin,0],skypoints[injbin,1]) (skyinjectionconfidence,toppoints,skyreses)=bayespputils.calculateConfidenceLevels(shist,skypoints,injbin,float(opts.skyres),confidence_levels,len(pos)) if injbin and skyinjectionconfidence: i=list(nonzero(np.asarray(toppoints)[:,2]==injbin))[0] min_sky_area_containing_injection=float(opts.skyres)*float(opts.skyres)*i print 'Minimum sky area containing injection point = %f square degrees'%min_sky_area_containing_injection myfig=plt.figure() plt.clf() m=Basemap(projection='moll',lon_0=180.0,lat_0=0.0) plx,ply=m(np.asarray(toppoints)[::-1,1]*57.296,np.asarray(toppoints)[::-1,0]*57.296) cnlevel=[1-tp for tp in np.asarray(toppoints)[::-1,3]] plt.scatter(plx,ply,s=5,c=cnlevel,faceted=False,cmap=matplotlib.cm.jet) m.drawmapboundary() m.drawparallels(np.arange(-90.,120.,45.),labels=[1,0,0,0],labelstyle='+/-') # draw parallels m.drawmeridians(np.arange(0.,360.,90.),labels=[0,0,0,1],labelstyle='+/-') # draw meridians plt.title("Skymap") # add a title plt.colorbar() myfig.savefig(os.path.join(outdir,'skymap.png')) plt.clf() #Save skypoints np.savetxt('ranked_sky_pixels',column_stack([np.asarray(toppoints)[:,0:1],np.asarray(toppoints)[:,1],np.asarray(toppoints)[:,3]])) return skyreses,skyinjectionconfidence | 1d62b6f57cbe6ce6c304f75ded4030fc36d5a3ed /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/1d62b6f57cbe6ce6c304f75ded4030fc36d5a3ed/cbcBayesSkyRes.py |
def get_ilwdchar_class(tbl_name, col_name): """ Searches the cache of pre-defined ilwdchar subclasses for a class whose table_name and column_name attributes match those provided. If a matching subclass is found it is returned; otherwise a new class is defined, added to the cache, and returned. Example: >>> process_id = get_ilwdchar_class("process", "process_id") >>> x = process_id(10) >>> x <glue.ligolw.ilwd.cached_ilwdchar_class object at 0x2b8de0a186a8> >>> str(x) 'process:process_id:10' Retrieving and storing the class provides a convenient mechanism for quickly constructing new ID objects. Example: >>> for i in range(10): ... print str(process_id(i)) ... process:process_id:0 process:process_id:1 process:process_id:2 process:process_id:3 process:process_id:4 process:process_id:5 process:process_id:6 process:process_id:7 process:process_id:8 process:process_id:9 """ # # if the class already exists, retrieve it # key = (str(tbl_name), str(col_name)) try: return ilwdchar_class_cache[key] except KeyError: # # define a new class, and add it to the cache # class cached_ilwdchar_class(ilwdchar): __slots__ = () table_name, column_name = key index_offset = len("%s:%s:" % key) def __conform__(self, protocol): # The presence of this method allows # ilwdchar sub-classes to be inserted # directly into SQLite databases as # strings. See # # http://www.python.org/dev/peps/pep-0246 # # for more information. # # NOTE: GvR has rejected that PEP, so this # mechanism is obsolete. Be prepared to # fix this, replacing it with whatever # replaces it. # # NOTE: The return should be inside an "if # protocol is sqlite3.PrepareProtocol:" # conditional, but that would require # importing sqlite3 which would break this # module on FC4 boxes, and I'm not going to # spend time fixing something that's # obsolete anyway. return unicode(self) ilwdchar_class_cache[key] = cached_ilwdchar_class return cached_ilwdchar_class | 46eeb182e5547b4a5fe3dd0cbde35ddc40c89f66 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/46eeb182e5547b4a5fe3dd0cbde35ddc40c89f66/ilwd.py |
||
class cached_ilwdchar_class(ilwdchar): __slots__ = () table_name, column_name = key index_offset = len("%s:%s:" % key) def __conform__(self, protocol): return unicode(self) ilwdchar_class_cache[key] = cached_ilwdchar_class return cached_ilwdchar_class | pass class cached_ilwdchar_class(ilwdchar): __slots__ = () table_name, column_name = key index_offset = len("%s:%s:" % key) def __conform__(self, protocol): return unicode(self) ilwdchar_class_cache[key] = cached_ilwdchar_class return cached_ilwdchar_class | def get_ilwdchar_class(tbl_name, col_name): """ Searches the cache of pre-defined ilwdchar subclasses for a class whose table_name and column_name attributes match those provided. If a matching subclass is found it is returned; otherwise a new class is defined, added to the cache, and returned. Example: >>> process_id = get_ilwdchar_class("process", "process_id") >>> x = process_id(10) >>> x <glue.ligolw.ilwd.cached_ilwdchar_class object at 0x2b8de0a186a8> >>> str(x) 'process:process_id:10' Retrieving and storing the class provides a convenient mechanism for quickly constructing new ID objects. Example: >>> for i in range(10): ... print str(process_id(i)) ... process:process_id:0 process:process_id:1 process:process_id:2 process:process_id:3 process:process_id:4 process:process_id:5 process:process_id:6 process:process_id:7 process:process_id:8 process:process_id:9 """ # # if the class already exists, retrieve it # key = (str(tbl_name), str(col_name)) try: return ilwdchar_class_cache[key] except KeyError: # # define a new class, and add it to the cache # class cached_ilwdchar_class(ilwdchar): __slots__ = () table_name, column_name = key index_offset = len("%s:%s:" % key) def __conform__(self, protocol): # The presence of this method allows # ilwdchar sub-classes to be inserted # directly into SQLite databases as # strings. See # # http://www.python.org/dev/peps/pep-0246 # # for more information. # # NOTE: GvR has rejected that PEP, so this # mechanism is obsolete. Be prepared to # fix this, replacing it with whatever # replaces it. # # NOTE: The return should be inside an "if # protocol is sqlite3.PrepareProtocol:" # conditional, but that would require # importing sqlite3 which would break this # module on FC4 boxes, and I'm not going to # spend time fixing something that's # obsolete anyway. return unicode(self) ilwdchar_class_cache[key] = cached_ilwdchar_class return cached_ilwdchar_class | 46eeb182e5547b4a5fe3dd0cbde35ddc40c89f66 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/46eeb182e5547b4a5fe3dd0cbde35ddc40c89f66/ilwd.py |
cp.set("makeCheckListWiki","ini-file",self.ini_file) | cp.set("makeCheckListWiki","ini-file",os.path.abspath(self.ini_file)) | def __init__(self, configfile=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind") | b04680f787bf19a80bc1102c1acbdbd5391a41f5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b04680f787bf19a80bc1102c1acbdbd5391a41f5/stfu_pipe.py |
Move both the start and the end of the segment a distance x away from the other. | Return a new segment whose bounds are given by subtracting x from the segment's lower bound and adding x to the segment's upper bound. | def protract(self, x): """ Move both the start and the end of the segment a distance x away from the other. """ return self.__class__(self[0] - x, self[1] + x) | b8d62cdad939775ab60dde40de9755d5089bae1a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b8d62cdad939775ab60dde40de9755d5089bae1a/segments.py |
Move both the start and the end of the segment a distance x towards the the other. | Return a new segment whose bounds are given by adding x to the segment's lower bound and subtracting x from the segment's upper bound. | def contract(self, x): """ Move both the start and the end of the segment a distance x towards the the other. """ return self.__class__(self[0] + x, self[1] - x) | b8d62cdad939775ab60dde40de9755d5089bae1a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b8d62cdad939775ab60dde40de9755d5089bae1a/segments.py |
Return a new segment by adding x to the upper and lower bounds of this segment. | Return a new segment whose bounds are given by adding x to the segment's upper and lower bounds. | def shift(self, x): """ Return a new segment by adding x to the upper and lower bounds of this segment. """ return tuple.__new__(self.__class__, (self[0] + x, self[1] + x)) | b8d62cdad939775ab60dde40de9755d5089bae1a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b8d62cdad939775ab60dde40de9755d5089bae1a/segments.py |
For each segment in the list, move both the start and the end a distance x away from the other. Coalesce the result. Segmentlist is modified in place. | Execute the .protract() method on each segment in the list and coalesce the result. Segmentlist is modified in place. | def protract(self, x): """ For each segment in the list, move both the start and the end a distance x away from the other. Coalesce the result. Segmentlist is modified in place. """ for i in xrange(len(self)): self[i] = self[i].protract(x) return self.coalesce() | b8d62cdad939775ab60dde40de9755d5089bae1a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b8d62cdad939775ab60dde40de9755d5089bae1a/segments.py |
For each segment in the list, move both the start and the end a distance x towards the other. Coalesce the result. Segmentlist is modified in place. | Execute the .contract() method on each segment in the list and coalesce the result. Segmentlist is modified in place. | def contract(self, x): """ For each segment in the list, move both the start and the end a distance x towards the other. Coalesce the result. Segmentlist is modified in place. """ for i in xrange(len(self)): self[i] = self[i].contract(x) return self.coalesce() | b8d62cdad939775ab60dde40de9755d5089bae1a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b8d62cdad939775ab60dde40de9755d5089bae1a/segments.py |
Shift the segmentlist by adding x to the upper and lower bounds of all segments. The algorithm is O(n) and does not require the list to be coalesced. Segmentlist is modified in place. | Execute the .shift() method on each segment in the list. The algorithm is O(n) and does not require the list to be coalesced nor does it coalesce the list. Segmentlist is modified in place. | def shift(self, x): """ Shift the segmentlist by adding x to the upper and lower bounds of all segments. The algorithm is O(n) and does not require the list to be coalesced. Segmentlist is modified in place. """ for i in xrange(len(self)): self[i] = self[i].shift(x) return self | b8d62cdad939775ab60dde40de9755d5089bae1a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b8d62cdad939775ab60dde40de9755d5089bae1a/segments.py |
[segment(11.0, 15)] | [segment(6.0, 15)] | def popitem(*args): raise NotImplementedError | 8e48b6075bb8a5cd31c1c0ad7f6050e789bbb144 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/8e48b6075bb8a5cd31c1c0ad7f6050e789bbb144/segments.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.