bugged
stringlengths
4
228k
fixed
stringlengths
0
96.3M
__index_level_0__
int64
0
481k
def populate_SkyLocTable(skyloctable,coinc,adt60,adt90,adt60dD60,adt90dD90,\ pt,grid_fname,skymap_fname=None): """ populate a row in a skyloctable """ row = skyloctable.RowType() row.end_time = coinc.time row.set_ifos(coinc.ifo_list) rhosquared = 0.0 for ifo in coinc.ifo_list: rhosquared += coinc.snr[ifo]*coinc.snr[ifo] row.comb_snr = sqrt(rhosquared) row.dec,row.ra = pt[0],pt[1] row.a60dt = adt60 row.a90dt = adt90 row.a60dt60dD = adt60dD60 row.a90dt90dD = adt90dD90 row.min_eff_distance = min(effD for effD in coinc.eff_distances.values()) if skymap_fname: row.skymap = os.path.basename(str(skymap_fname)) else: row.skymap = skymap_fname row.grid = os.path.basename(str(grid_fname)) skyloctable.append(row)
def populate_SkyLocTable(skyloctable,coinc,adt60,adt90,arank60,arank90,\ pt,grid_fname,skymap_fname=None): """ populate a row in a skyloctable """ row = skyloctable.RowType() row.end_time = coinc.time row.set_ifos(coinc.ifo_list) rhosquared = 0.0 for ifo in coinc.ifo_list: rhosquared += coinc.snr[ifo]*coinc.snr[ifo] row.comb_snr = sqrt(rhosquared) row.dec,row.ra = pt[0],pt[1] row.a60dt = adt60 row.a90dt = adt90 row.a60dt60dD = adt60dD60 row.a90dt90dD = adt90dD90 row.min_eff_distance = min(effD for effD in coinc.eff_distances.values()) if skymap_fname: row.skymap = os.path.basename(str(skymap_fname)) else: row.skymap = skymap_fname row.grid = os.path.basename(str(grid_fname)) skyloctable.append(row)
479,700
def populate_SkyLocTable(skyloctable,coinc,adt60,adt90,adt60dD60,adt90dD90,\ pt,grid_fname,skymap_fname=None): """ populate a row in a skyloctable """ row = skyloctable.RowType() row.end_time = coinc.time row.set_ifos(coinc.ifo_list) rhosquared = 0.0 for ifo in coinc.ifo_list: rhosquared += coinc.snr[ifo]*coinc.snr[ifo] row.comb_snr = sqrt(rhosquared) row.dec,row.ra = pt[0],pt[1] row.a60dt = adt60 row.a90dt = adt90 row.a60dt60dD = adt60dD60 row.a90dt90dD = adt90dD90 row.min_eff_distance = min(effD for effD in coinc.eff_distances.values()) if skymap_fname: row.skymap = os.path.basename(str(skymap_fname)) else: row.skymap = skymap_fname row.grid = os.path.basename(str(grid_fname)) skyloctable.append(row)
def populate_SkyLocTable(skyloctable,coinc,adt60,adt90,adt60dD60,adt90dD90,\ pt,grid_fname,skymap_fname=None): """ populate a row in a skyloctable """ row = skyloctable.RowType() row.end_time = coinc.time row.set_ifos(coinc.ifo_list) rhosquared = 0.0 for ifo in coinc.ifo_list: rhosquared += coinc.snr[ifo]*coinc.snr[ifo] row.comb_snr = sqrt(rhosquared) row.dec,row.ra = pt[0],pt[1] row.a60dt = adt60 row.a90dt = adt90 row.a60rank = arank60 row.a90rank = arank90 row.min_eff_distance = min(effD for effD in coinc.eff_distances.values()) if skymap_fname: row.skymap = os.path.basename(str(skymap_fname)) else: row.skymap = skymap_fname row.grid = os.path.basename(str(grid_fname)) skyloctable.append(row)
479,701
def __init__(self, *args): pipeline.LigolwAddNode.__init__(self, *args) self.input_cache = [] self.output_cache = [] self.cache_dir = os.path.join(os.getcwd(), self.job().cache_dir)
def __init__(self, job, remove_input, *args): pipeline.LigolwAddNode.__init__(self, job, *args) self.input_cache = [] self.output_cache = [] self.cache_dir = os.path.join(os.getcwd(), self.job().cache_dir)
479,702
def add_preserve_cache(self, cache): for c in cache: self.add_var_arg("--remove-input-except %s" % c.path())
def add_preserve_cache(self, cache): for c in cache: self.add_var_arg("--remove-input-except %s" % c.path())
479,703
def make_lladd_fragment(dag, parents, tag, segment = None, input_cache = None, preserve_cache = None, extra_input_cache = None): node = LigolwAddNode(lladdjob) # link to parents for parent in parents: node.add_parent(parent) # build input cache if input_cache is None: # default is to use all output files from parents for parent in parents: node.add_input_cache(parent.get_output_cache()) else: # but calling code can provide its own collection node.add_input_cache(input_cache) if extra_input_cache is not None: # sometimes it helps to add some extra node.add_input_cache(extra_input_cache) if preserve_cache is not None: node.add_preserve_cache(preserve_cache) # construct names for the node and output file, and override the # segment if needed [cache_entry] = node.get_output_cache() if segment is None: segment = cache_entry.segment node.set_name("lladd_%s_%s_%d_%d" % (tag, cache_entry.observatory, int(segment[0]), int(abs(segment)))) node.set_output("%s-%s-%d-%d.xml.gz" % (cache_entry.observatory, tag, int(segment[0]), int(abs(segment))), segment = segment) node.set_retry(3) dag.add_node(node) return set([node])
def make_lladd_fragment(dag, parents, tag, segment = None, input_cache = None, remove_input = False, preserve_cache = None, extra_input_cache = None): node = LigolwAddNode(lladdjob, remove_input = remove_input) # link to parents for parent in parents: node.add_parent(parent) # build input cache if input_cache is None: # default is to use all output files from parents for parent in parents: node.add_input_cache(parent.get_output_cache()) else: # but calling code can provide its own collection node.add_input_cache(input_cache) if extra_input_cache is not None: # sometimes it helps to add some extra node.add_input_cache(extra_input_cache) if preserve_cache is not None: node.add_preserve_cache(preserve_cache) # construct names for the node and output file, and override the # segment if needed [cache_entry] = node.get_output_cache() if segment is None: segment = cache_entry.segment node.set_name("lladd_%s_%s_%d_%d" % (tag, cache_entry.observatory, int(segment[0]), int(abs(segment)))) node.set_output("%s-%s-%d-%d.xml.gz" % (cache_entry.observatory, tag, int(segment[0]), int(abs(segment))), segment = segment) node.set_retry(3) dag.add_node(node) return set([node])
479,704
def make_multibinj_fragment(dag, seg, tag): flow = float(powerjob.get_opts()["low-freq-cutoff"]) fhigh = flow + float(powerjob.get_opts()["bandwidth"]) nodes = make_binj_fragment(dag, seg, tag, 0.0, flow, fhigh) return make_lladd_fragment(dag, nodes, tag)
def make_multibinj_fragment(dag, seg, tag): flow = float(powerjob.get_opts()["low-freq-cutoff"]) fhigh = flow + float(powerjob.get_opts()["bandwidth"]) nodes = make_binj_fragment(dag, seg, tag, 0.0, flow, fhigh) return make_lladd_fragment(dag, nodes, tag, remove_input = True)
479,705
def get_coincs_from_coire(self,files): """ uses CoincInspiralUtils to get data from old-style (coire'd) coincs """ coincTrigs = CoincInspiralUtils.coincInspiralTable() inspTrigs = SnglInspiralUtils.ReadSnglInspiralFromFiles(files, \ mangle_event_id = True,verbose=None) #note that it's hardcoded to use snr as the statistic coincTrigs = CoincInspiralUtils.coincInspiralTable(inspTrigs,'snr') try: inspInj = SimInspiralUtils.ReadSimInspiralFromFiles(files) coincTrigs.add_sim_inspirals(inspInj) #FIXME: name the exception! except: pass
def get_coincs_from_coire(self,files): """ uses CoincInspiralUtils to get data from old-style (coire'd) coincs """ coincTrigs = CoincInspiralUtils.coincInspiralTable() inspTrigs = SnglInspiralUtils.ReadSnglInspiralFromFiles(files, \ mangle_event_id = True,verbose=None) #note that it's hardcoded to use snr as the statistic statistic = CoincInspiralUtils.coincStatistic('snr',None,None) coincTrigs = CoincInspiralUtils.coincInspiralTable(inspTrigs,statistic) try: inspInj = SimInspiralUtils.ReadSimInspiralFromFiles(files) coincTrigs.add_sim_inspirals(inspInj) #FIXME: name the exception! except: pass
479,706
def gridsky(resolution): """ grid the sky up into roughly square regions resolution is the length of a side the points get placed at the center of the squares and to first order each square has an area of resolution^2 """ latitude = 0.0 longitude = pi ds = pi*sqrt(2.0)*resolution/180.0 points = [(latitude-0.5*pi, longitude)] while latitude <= pi: latitude += ds longitude = 0.0 points.append((latitude-0.5*pi, longitude)) while longitude <= 2.0*pi: longitude += ds / abs(sin(latitude)) points.append((latitude-0.5*pi, longitude)) #there's some slop so get rid of it and only focus on points on the sphere sphpts = [] for pt in points: if pt[0] > pi/2 or pt[0] < -pi/2 \ or pt[1] > 2*pi or pt[1] < 0: pass else: sphpts.append(pt) return sphpts
def gridsky(resolution): """ grid the sky up into roughly square regions resolution is the length of a side the points get placed at the center of the squares and to first order each square has an area of resolution^2 """ latitude = 0.0 longitude = pi ds = pi*resolution/180.0 points = [(latitude-0.5*pi, longitude)] while latitude <= pi: latitude += ds longitude = 0.0 points.append((latitude-0.5*pi, longitude)) while longitude <= 2.0*pi: longitude += ds / abs(sin(latitude)) points.append((latitude-0.5*pi, longitude)) #there's some slop so get rid of it and only focus on points on the sphere sphpts = [] for pt in points: if pt[0] > pi/2 or pt[0] < -pi/2 \ or pt[1] > 2*pi or pt[1] < 0: pass else: sphpts.append(pt) return sphpts
479,707
def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid coarsedict = {} ds = coarseres*pi/180 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*abs(sin(fpt[1]))*abs(sin(fpt[1])) \ <= ds*ds/4: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp
def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid[:] coarsedict = {} ds = coarseres*pi/180 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*abs(sin(fpt[1]))*abs(sin(fpt[1])) \ <= ds*ds/4: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp
479,708
def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid coarsedict = {} ds = coarseres*pi/180 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*abs(sin(fpt[1]))*abs(sin(fpt[1])) \ <= ds*ds/4: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp
def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid coarsedict = {} ds = coarseres*pi/180.0 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*abs(sin(fpt[1]))*abs(sin(fpt[1])) \ <= ds*ds/4: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp
479,709
def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid coarsedict = {} ds = coarseres*pi/180 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*abs(sin(fpt[1]))*abs(sin(fpt[1])) \ <= ds*ds/4: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp
def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid coarsedict = {} ds = coarseres*pi/180 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4.0 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*sin(cpt[0])*sin(cpt[0]) \ <= ds*ds/4.0: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp
479,710
def IMRpeakAmp(m1,m2,spin1z,spin2z,d): """ IMRpeakAmp finds the peak amplitude of the waveform for a given source parameters and the source distance. usage: IMRpeakAmp(m1,m2,spin1z,spin2z,distance) e.g. spawaveApp.IMRpeakAmp(30,40,0.45,0.5,100) """ chi = spawaveform.computechi(m1, m2, spin1z, spin2z) imrfFinal = spawaveform.imrffinal(m1, m2, chi, 'fcut') fLower = 10.0 order = 7 dur = 2**numpy.ceil(numpy.log2(spawaveform.chirptime(m1,m2,order,fLower))) sr = 2**numpy.ceil(numpy.log2(imrfFinal*2)) deltaF = 1.0 / dur deltaT = 1.0 / sr s = numpy.empty(sr * dur, 'complex128') spawaveform.imrwaveform(m1, m2, deltaF, fLower, s, spin1z, spin2z) s = scipy.ifft(s) #s = numpy.abs(s) s = numpy.real(s) max = numpy.max(s)/d return max
def IMRpeakAmp(m1,m2,spin1z,spin2z,d): """ IMRpeakAmp finds the peak amplitude of the waveform for a given source parameters and the source distance. usage: IMRpeakAmp(m1,m2,spin1z,spin2z,distance) e.g. spawaveApp.IMRpeakAmp(30,40,0.45,0.5,100) """ chi = spawaveform.computechi(m1, m2, spin1z, spin2z) imrfFinal = spawaveform.imrffinal(m1, m2, chi, 'fcut') fLower = 10.0 order = 7 dur = 2**numpy.ceil(numpy.log2(spawaveform.chirptime(m1,m2,order,fLower))) sr = 2**numpy.ceil(numpy.log2(imrfFinal*2)) deltaF = 1.0 / dur deltaT = 1.0 / sr s = numpy.empty(sr * dur, 'complex128') spawaveform.imrwaveform(m1, m2, deltaF, fLower, s, spin1z, spin2z) s = scipy.ifft(s) #s = numpy.abs(s) s = numpy.real(s) max = numpy.max(s)/d return max
479,711
def add_noninjections(self, param_func, database, *args): # iterate over burst<-->burst coincs cursor = database.connection.cursor() for coinc_event_id, time_slide_id in database.connection.cursor().execute("""
def add_noninjections(self, param_func, database, *args): # iterate over burst<-->burst coincs cursor = database.connection.cursor() for coinc_event_id, time_slide_id in database.connection.cursor().execute("""
479,712
def get_username(): """ Try to retrieve the username from a variety of sources. First the environment variable LOGNAME is tried, if that is not set the environment variable USERNAME is tried, if that is not set the password database is consulted (only on Unix systems, if the import of the pwd module succedes), finally if that fails KeyError is raised. """ try: return os.environ["LOGNAME"] except KeyError: pass try: return os.environ["USERNAME"] except KeyError: pass try: import pwd return pwd.getpwuid(os.getuid())[0] except ImportError, KeyError: raise KeyError
def get_username(): """ Try to retrieve the username from a variety of sources. First the environment variable LOGNAME is tried, if that is not set the environment variable USERNAME is tried, if that is not set the password database is consulted (only on Unix systems, if the import of the pwd module succedes), finally if that fails KeyError is raised. """ try: return os.environ["LOGNAME"] except KeyError: pass try: return os.environ["USERNAME"] except KeyError: pass try: import pwd return pwd.getpwuid(os.getuid())[0] except (ImportError, KeyError): raise KeyError
479,713
def __readCache__(self,cacheListing=list()): """ Simple mehtod to read in a cache or list of cache files and return a list of files or an empty list if nothing found """ #Open the cache entry and search for those entrys fileListing=list() for entry in cacheListing: #Cache files listed themselves comment out following line fileListing.append(entry) fileListing.extend([x.rstrip("\n") for x in file(entry)]) #PATCH START to add in the z distribution files for fname in fileListing: if ".html" in fname: zFile=fname.replace(".html",".txt") fileListing.append(zFile) #PATCH END finalList=list() for thisFile in fileListing: #Search filesystem for file full path finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile)) #Look for potential matching thumbnails if thisFile.endswith(".png"): finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile.replace(".png","?thumb?png"))) if len(finalList) < 1: return list() else: return finalList
def __readCache__(self,cacheListing=list()): """ Simple mehtod to read in a cache or list of cache files and return a list of files or an empty list if nothing found. It uses the pathing information from the files passed via cacheListing to aid in our filesystem search. """ #Open the cache entry and search for those entrys fileListing=list() for entry in cacheListing: #Cache files listed themselves comment out following line fileListing.append(entry) fileListing.extend([x.rstrip("\n") for x in file(entry)]) #PATCH START to add in the z distribution files for fname in fileListing: if ".html" in fname: zFile=fname.replace(".html",".txt") fileListing.append(zFile) #PATCH END finalList=list() for thisFile in fileListing: #Search filesystem for file full path finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile)) #Look for potential matching thumbnails if thisFile.endswith(".png"): finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile.replace(".png","?thumb?png"))) if len(finalList) < 1: return list() else: return finalList
479,714
def __readCache__(self,cacheListing=list()): """ Simple mehtod to read in a cache or list of cache files and return a list of files or an empty list if nothing found """ #Open the cache entry and search for those entrys fileListing=list() for entry in cacheListing: #Cache files listed themselves comment out following line fileListing.append(entry) fileListing.extend([x.rstrip("\n") for x in file(entry)]) #PATCH START to add in the z distribution files for fname in fileListing: if ".html" in fname: zFile=fname.replace(".html",".txt") fileListing.append(zFile) #PATCH END finalList=list() for thisFile in fileListing: #Search filesystem for file full path finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile)) #Look for potential matching thumbnails if thisFile.endswith(".png"): finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile.replace(".png","?thumb?png"))) if len(finalList) < 1: return list() else: return finalList
def __readCache__(self,cacheListing=list()): """ Simple mehtod to read in a cache or list of cache files and return a list of files or an empty list if nothing found """ #Open the cache entry and search for those entrys finalList=list() for entry in cacheListing: #Cache files listed themselves comment out following line fileListing.append(entry) fileListing.extend([x.rstrip("\n") for x in file(entry)]) #PATCH START to add in the z distribution files for fname in fileListing: if ".html" in fname: zFile=fname.replace(".html",".txt") fileListing.append(zFile) #PATCH END finalList=list() for thisFile in fileListing: #Search filesystem for file full path finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile)) #Look for potential matching thumbnails if thisFile.endswith(".png"): finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile.replace(".png","?thumb?png"))) if len(finalList) < 1: return list() else: return finalList
479,715
def __readCache__(self,cacheListing=list()): """ Simple mehtod to read in a cache or list of cache files and return a list of files or an empty list if nothing found """ #Open the cache entry and search for those entrys fileListing=list() for entry in cacheListing: #Cache files listed themselves comment out following line fileListing.append(entry) fileListing.extend([x.rstrip("\n") for x in file(entry)]) #PATCH START to add in the z distribution files for fname in fileListing: if ".html" in fname: zFile=fname.replace(".html",".txt") fileListing.append(zFile) #PATCH END finalList=list() for thisFile in fileListing: #Search filesystem for file full path finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile)) #Look for potential matching thumbnails if thisFile.endswith(".png"): finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile.replace(".png","?thumb?png"))) if len(finalList) < 1: return list() else: return finalList
def __readCache__(self,cacheListing=list()): """ Simple mehtod to read in a cache or list of cache files and return a list of files or an empty list if nothing found """ #Open the cache entry and search for those entrys fileListing=list() for entry in cacheListing: #Cache files listed themselves comment out following line fileListing.append(entry) fileListing.extend([x.rstrip("\n") for x in file(entry)]) #PATCH START to add in the z distribution files for fname in fileListing: if ".html" in fname: zFile=fname.replace(".html",".txt") fileListing.append(zFile) #PATCH END finalList=list() for thisFile in fileListing: #Search filesystem for file full path finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile)) #Look for potential matching thumbnails if thisFile.endswith(".png"): finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile.replace(".png","?thumb?png"))) if len(finalList) < 1: return list() else: return finalList
479,716
def get_analyzeQscan_SEIS(self): """ This seeks out the html and png files associated with SEIS result of an analyzeQscan job. """ cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*/%s-analyseQscan_%s_%s*_seis_rds*.cache"%(sngl.ifo,sngl.ifo,timeString) #Read the cache file or files cacheList.extend(fnmatch.filter(self.fsys,myCacheMask)) cacheFiles=self.__readCache__(cacheList) return cacheFiles
def get_analyzeQscan_SEIS(self): """ This seeks out the html and png files associated with SEIS result of an analyzeQscan job. """ cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*%s*/%s-analyseQscan_%s_%s*_seis_rds*.cache"%\ (self.coinc.type,sngl.ifo,sngl.ifo,timeString) #Read the cache file or files cacheList.extend(fnmatch.filter(self.fsys,myCacheMask)) cacheFiles=self.__readCache__(cacheList) return cacheFiles
479,717
def get_analyzeQscan_RDS(self): """ """ #analyseQscan.py_FG_RDS_full_data/H1-analyseQscan_H1_931176926_116_rds-unspecified-gpstime.cache cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*/%s-analyseQscan_%s_%s_rds*.cache"%(sngl.ifo,sngl.ifo,timeString) #Ignore the files with seis_rds in them for x in fnmatch.filter(self.fsys,myCacheMask): if not x.__contains__('seis_rds'): cacheList.append(x) #Read the cache file or files cacheFiles=self.__readCache__(cacheList) return cacheFiles
def get_analyzeQscan_RDS(self): """ """ #analyseQscan.py_FG_RDS_full_data/H1-analyseQscan_H1_931176926_116_rds-unspecified-gpstime.cache cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*%s*/%s-analyseQscan_%s_%s_rds*.cache"%\ (self.coint.type,sngl.ifo,sngl.ifo,timeString) #Ignore the files with seis_rds in them for x in fnmatch.filter(self.fsys,myCacheMask): if not x.__contains__('seis_rds'): cacheList.append(x) #Read the cache file or files cacheFiles=self.__readCache__(cacheList) return cacheFiles
479,718
def get_analyzeQscan_HT(self): """ """ #analyseQscan.py_FG_HT_full_data/H1-analyseQscan_H1_931176926_116_ht-unspecified-gpstime.cache cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*/%s-analyseQscan_%s_%s*_ht*.cache"%(sngl.ifo,sngl.ifo,timeString) cacheList.extend(fnmatch.filter(self.fsys,myCacheMask)) #Read the cache file or files cacheFiles=self.__readCache__(cacheList) return cacheFiles
def get_analyzeQscan_HT(self): """ """ #analyseQscan.py_FG_HT_full_data/H1-analyseQscan_H1_931176926_116_ht-unspecified-gpstime.cache cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*%s*/%s-analyseQscan_%s_%s*_ht*.cache"\ %(self.coinc.type,sngl.ifo,sngl.ifo,timeString) cacheList.extend(fnmatch.filter(self.fsys,myCacheMask)) #Read the cache file or files cacheFiles=self.__readCache__(cacheList) return cacheFiles
479,719
def insertAnalyzeQscanTable(self, images=None, thumbs=None, indexes=None, ranksOmega=None, imagesAQ=None, thumbsAQ=None, indexesAQ=None, ranksAQ=None): """ Insert a multiple IFO table with 5 cols with the AQ underneath this depends on the numer of IFO keys in indexes dictionary. The option channelRanks is not required to change the plot order! Channel ranks is dict similar in shape to other args. Cells are shaded light grey if they are top N channels and that the trigger is greater in value that 0.5. Assuming the channelRanks dict is not empty. """ #Review the keys for Qscans and analyzeQscans. if not images.keys()==thumbs.keys()==indexes.keys(): sys.stderr.write("Error: Keys for Qscan tables creations inconsistent!\n") if not imagesAQ.keys()==thumbsAQ.keys()==indexesAQ.keys(): sys.stderr.write("Error: Keys for Qscan tables creations inconsistent!\n") keyList=indexes.keys() if len(keyList) < indexesAQ.keys(): keyList=indexesAQ.keys() for ifo in keyList: # If channel files exist read those # assuming that there are related images to plot channelNames=list() if ranksOmega[ifo] and images[ifo]: #Add only channels in Omega with a plot also. tmpChannels=[str(x[0]).strip() for x in ranksOmega[ifo]] for image in images[ifo]: for myChan in tmpChannels: if os.path.basename(image).__contains__(myChan): channelNames.append(myChan) if ranksAQ[ifo] and imagesAQ[ifo]: #Add only channels in Omega with a plot also. tmpChannels=[str(x[0]).strip() for x in ranksAQ[ifo]] for imageAQ in imagesAQ[ifo]: for myChan in tmpChannels: if os.path.basename(imageAQ).__contains__(myChan): channelNames.append(myChan) if (images[ifo]) and (not ranksOmega[ifo]): sys.stdout.write("Converting Omega filenames to channel names.\n") channelNames.extend(self.__filenameToChannelList__(images[ifo])) if (not ranksAQ[ifo]) and (imagesAQ[ifo]): sys.stdout.write("Converting AnalyzeQscan filenames to channel names.\n") channelNames.extend(self.__filenameToChannelList__(imagesAQ[ifo])) #From all available channel names make a UNIQUE listing! #Simplify channel names Kill L1:, L0: etc ... channelNames=[x.strip().split(":",1)[1] for x in channelNames] uniqChannelNames=list() lastName=None channelNames.sort() while channelNames: myName=channelNames.pop() if lastName != myName: lastName=myName uniqChannelNames.append(myName) #Check if uniqChannelNames list empty if len(uniqChannelNames) < 1: sys.stderr.write("Warning: [%s] No channels available to plot in table!\n"%ifo) uniqChannelNames.append("No_Channels_To_Display") ranksAQ[ifo]=list() ranksOmega[ifo]=list() #Extract only channel ranks which are available to plot! trimRanksOmega=list() trimRanksAQ=list() while ranksOmega[ifo]: nameRO=ranksOmega[ifo].pop() #If at least 1 match if str(nameRO[0]).strip().split(":",1)[1] in uniqChannelNames: trimRanksOmega.append(nameRO) while ranksAQ[ifo]: nameRAQ=ranksAQ[ifo].pop() #If at least 1 match if str(nameRAQ[0]).strip().split(":",1)[1] in uniqChannelNames: trimRanksAQ.append(nameRAQ) # Configure table columns colCount=3 # Create short list count shortListLength=3*colCount #Create a short list for analyzeQscan if available shortList=list() if trimRanksAQ: tmpList=[[x[2],x] for x in trimRanksAQ] tmpList.sort(reverse=True) shortList=[x[1] for x in tmpList][0:min(len(tmpList),shortListLength)] #Select channels to plot if shortlist gt zero else plot all! if shortList: shortListChannels=[a for a,b,c in shortList] else: shortListChannels=uniqChannelNames #Create table object fullRows,modRows=divmod(len(shortListChannels),colCount) if modRows > 0: rowCount=fullRows+1 else: rowCount=fullRows myTable=self.wikiTable(rowCount,colCount) myTable.setTableStyle("text-align:center") #Insert HTML links and IFO Label contentString="" contentString=contentString+" %s "%(ifo) #Add html links for table title for newLink in indexes[ifo]: contentString=contentString+" %s "%self.makeExternalLink(newLink,"Qscan") for newLink in indexesAQ[ifo]: contentString=contentString+" %s "%self.makeExternalLink(newLink,"analyzeQscan") myTable.setTableHeadline(contentString) #Start filling cells with Qscan and analyzeQscan scatter plot for cellNum,channel in enumerate(shortListChannels): #Grab plot info for this channel name #Search and replace ":" -> "_" Remeber for analyzeQscan filenames! #Qscan filenames use ":" and analyzeQscan filenames use "_"! myName=channel try: myOmegaIndex=[x.__contains__(myName) for x in images[ifo]].index(True) except ValueError: myOmegaIndex=None try: myOmegaIndexT=[x.__contains__(myName) for x in thumbs[ifo]].index(True) except ValueError: myOmegaIndexT=None try: myAQIndex=[x.__contains__(myName.replace(":","_")) \ for x in imagesAQ[ifo]].index(True) except ValueError: myAQIndex=None try: myAQIndexT=[x.__contains__(myName.replace(":","_")) \ for x in thumbsAQ[ifo]].index(True) except ValueError: myAQIndexT=None cellString="" #If there was a shortList add the Z value percentage to table! if myName and len(shortList) > 0: #Find rank myRank=0.0 for sName,sZ,sP in shortList: if sName.__contains__(myName): myRank=sP cellString=cellString+" %s Z-Percentage:%1.2f <<BR>> "%(myName,float(myRank)) elif myName: cellString=cellString+" %s <<BR>> "%myName else: cellString=cellString+" Unknown_Channel <<BR>> " if myOmegaIndex!=None: cellString=cellString+" %s "%self.linkedRemoteImage(thumbs[ifo][myOmegaIndexT], images[ifo][myOmegaIndex]) else: cellString=cellString+" Unavailable_Qscan <<BR>> " if myAQIndex!=None: cellString=cellString+" %s "%self.linkedRemoteImage(thumbsAQ[ifo][myAQIndexT], imagesAQ[ifo][myAQIndex]) else: cellString=cellString+" Unavailable_analyzeQScan <<BR>> " #Add string to cell myRow,myCol=divmod(cellNum,colCount) myTable.data[myRow][myCol]=" %s "%cellString self.insertTable(myTable)
def insertAnalyzeQscanTable(self, images=None, thumbs=None, indexes=None, ranksOmega=None, imagesAQ=None, thumbsAQ=None, indexesAQ=None, ranksAQ=None): """ Insert a multiple IFO table with 5 cols with the AQ underneath this depends on the numer of IFO keys in indexes dictionary. The option channelRanks is not required to change the plot order! Channel ranks is dict similar in shape to other args. Cells are shaded light grey if they are top N channels and that the trigger is greater in value that 0.5. Assuming the channelRanks dict is not empty. """ #Review the keys for Qscans and analyzeQscans. if not images.keys()==thumbs.keys()==indexes.keys(): sys.stderr.write("Error: Keys for Qscan tables creations inconsistent!\n") if not imagesAQ.keys()==thumbsAQ.keys()==indexesAQ.keys(): sys.stderr.write("Error: Keys for Qscan tables creations inconsistent!\n") keyList=indexes.keys() if len(keyList) < indexesAQ.keys(): keyList=indexesAQ.keys() for ifo in keyList: # If channel files exist read those # assuming that there are related images to plot channelNames=list() if ranksOmega[ifo] and images[ifo]: #Add only channels in Omega with a plot also. tmpChannels=[str(x[0]).strip() for x in ranksOmega[ifo]] for image in images[ifo]: for myChan in tmpChannels: if os.path.basename(image).__contains__(myChan): channelNames.append(myChan) if ranksAQ[ifo] and imagesAQ[ifo]: #Add only channels in Omega with a plot also. tmpChannels=[str(x[0]).strip() for x in ranksAQ[ifo]] for imageAQ in imagesAQ[ifo]: for myChan in tmpChannels: if os.path.basename(imageAQ).__contains__(myChan): channelNames.append(myChan) if (images[ifo]) and (not ranksOmega[ifo]): sys.stdout.write("Converting Omega filenames to channel names.\n") channelNames.extend(self.__filenameToChannelList__(images[ifo])) if (not ranksAQ[ifo]) and (imagesAQ[ifo]): sys.stdout.write("Converting AnalyzeQscan filenames to channel names.\n") channelNames.extend(self.__filenameToChannelList__(imagesAQ[ifo])) #From all available channel names make a UNIQUE listing! #Simplify channel names Kill L1:, L0: etc ... channelNames=[x.strip().split(":",1)[1] for x in channelNames] uniqChannelNames=list() lastName=None channelNames.sort() while channelNames: myName=channelNames.pop() if lastName != myName: lastName=myName uniqChannelNames.append(myName) #Check if uniqChannelNames list empty if len(uniqChannelNames) < 1: sys.stderr.write("Warning: [%s] No channels available to plot in table!\n"%ifo) uniqChannelNames.append("No_Channels_To_Display") ranksAQ[ifo]=list() ranksOmega[ifo]=list() #Extract only channel ranks which are available to plot! trimRanksOmega=list() trimRanksAQ=list() while ranksOmega[ifo]: nameRO=ranksOmega[ifo].pop() #If at least 1 match if str(nameRO[0]).strip().split(":",1)[1] in uniqChannelNames: trimRanksOmega.append(nameRO) while ranksAQ[ifo]: nameRAQ=ranksAQ[ifo].pop() #If at least 1 match if str(nameRAQ[0]).strip().split(":",1)[1] in uniqChannelNames: trimRanksAQ.append(nameRAQ) # Configure table columns colCount=3 # Create short list count shortListLength=3*colCount #Create a short list for analyzeQscan if available shortList=list() if trimRanksAQ: tmpList=[[x[2],x] for x in trimRanksAQ] tmpList.sort(reverse=True) shortList=[x[1] for x in tmpList][0:min(len(tmpList),shortListLength)] #Select channels to plot if shortlist gt zero else plot all! if shortList: shortListChannels=[a for a,b,c in shortList] else: shortListChannels=uniqChannelNames #Create table object fullRows,modRows=divmod(len(shortListChannels),colCount) if modRows > 0: rowCount=fullRows+1 else: rowCount=fullRows myTable=self.wikiTable(rowCount,colCount) myTable.setTableStyle("text-align:center") #Insert HTML links and IFO Label contentString="" contentString=contentString+" %s "%(ifo) #Add html links for table title for newLink in indexes[ifo]: contentString=contentString+" %s "%self.makeExternalLink(newLink,"Qscan") for newLink in indexesAQ[ifo]: contentString=contentString+" %s "%self.makeExternalLink(newLink,"analyzeQscan") myTable.setTableHeadline(contentString) #Start filling cells with Qscan and analyzeQscan scatter plot for cellNum,channel in enumerate(shortListChannels): #Grab plot info for this channel name #Search and replace ":" -> "_" Remeber for analyzeQscan filenames! #Qscan filenames use ":" and analyzeQscan filenames use "_"! myName=channel try: myOmegaIndex=[x.__contains__(myName) for x in images[ifo]].index(True) except ValueError: myOmegaIndex=None try: myOmegaIndexT=[x.__contains__(myName) for x in thumbs[ifo]].index(True) except ValueError: myOmegaIndexT=None try: myAQIndex=[x.__contains__(myName.replace(":","_")) \ for x in imagesAQ[ifo]].index(True) except ValueError: myAQIndex=None try: myAQIndexT=[x.__contains__(myName.replace(":","_")) \ for x in thumbsAQ[ifo]].index(True) except ValueError: myAQIndexT=None cellString="" #If there was a shortList add the Z value percentage to table! if myName and len(shortList) > 0: #Find rank myRank=0.0 for sName,sZ,sP in shortList: if sName.__contains__(myName): myRank=sP cellString=cellString+" %s Z-Percentage:%1.2f <<BR>> "%(myName,float(myRank)) elif myName: cellString=cellString+" %s <<BR>> "%myName else: cellString=cellString+" Unknown_Channel <<BR>> " if myOmegaIndex!=None: cellString=cellString+" %s "%self.linkedRemoteImage(thumbs[ifo][myOmegaIndexT], images[ifo][myOmegaIndex]) else: cellString=cellString+" Unavailable_Qscan <<BR>> " if myAQIndex!=None: cellString=cellString+" %s "%self.linkedRemoteImage(thumbsAQ[ifo][myAQIndexT], imagesAQ[ifo][myAQIndex]) else: cellString=cellString+" Unavailable_analyzeQScan <<BR>> " #Add string to cell myRow,myCol=divmod(cellNum,colCount) myTable.data[myRow][myCol]=" %s "%cellString self.insertTable(myTable)
479,720
def resetPicklePointer(self,filename=None): """ If you called the class definition with the wrong pickle path. You can reset it with this method. """ if filename==None: os.stdout.write("Path information to background pickle unchanged.\n") elif filename.__contains__("~"): self.__backgroundPickle__=os.path.expanduser(filename) else: self.__backgroundPickle__=filename
def resetPicklePointer(self,filename=None): """ If you called the class definition with the wrong pickle path. You can reset it with this method. """ if filename==None: sys.stdout.write("Path information to background pickle unchanged.\n") elif filename.__contains__("~"): self.__backgroundPickle__=os.path.expanduser(filename) else: self.__backgroundPickle__=filename
479,721
def fetchInformationDualWindow(self,triggerTime=None,frontWindow=300,\ backWindow=150,ifoList='DEFAULT'): """ This method is responsible for queries to the data server. The results of the query become an internal list that can be converted into an HTML table. The arguments allow you to query with trigger time of interest and to change the window with each call if desired. The version argument will fetch segments with that version or higher. """ if ifoList=="DEFAULT": ifoList=interferometers if (ifoList == None) or \ (len(ifoList) < 1): sys.stderr.write("Ifolist passed is malformed! : %s\n"%ifoList) return #Set the internal class variable self.ifos self.ifos=ifoList if sum([x.upper() in interferometers for x in ifoList]) < 1: sys.stderr.write("Valid ifos not specified for DQ lookups. %s\n"%ifoList) return triggerTime=float(triggerTime) if triggerTime==int(-1): os.stdout.write("Specify trigger time please.\n") return else: self.triggerTime = float(triggerTime) gpsEnd=int(triggerTime)+int(backWindow) gpsStart=int(triggerTime)-int(frontWindow) sqlString=self.dqvQueryLatestVersion%(gpsEnd,gpsStart) self.resultList=self.query(sqlString) if len(self.resultList) < 1: sys.stdout.write("Query Completed, Nothing Returned for time %s.\n"%(triggerTime)) #Coalesce the segments for each DQ flag #Reparse the information newDQSeg=list() if self.resultList.__len__() > 0: #Obtain list of all flags, ignore IFOs not specified uniqSegmentName=list() for ifo,name,version,comment,start,end in self.resultList: if (not uniqSegmentName.__contains__((ifo,name,version,comment))) and \ (ifo.strip().upper() in ifoList): uniqSegmentName.append((ifo,name,version,comment)) #Add the SCIENCE segment no matter which IFOs are specified! if ((name.lower().__contains__('science')) and \ not (ifo.strip().upper() in ifoList)): uniqSegmentName.append((ifo,name,version,comment)) #Save textKey for all uniq segments combos for uifo,uname,uversion,ucomment in uniqSegmentName: segmentIntervals=list() #Extra segments based on uniq textKey for ifo,name,version,comment,start,end in self.resultList: if (uifo,uname,uversion,ucomment)==(ifo,name,version,comment): segmentIntervals.append((start,end)) segmentIntervals.sort() #Coalesce those segments newSegmentIntervals=self.__merge__(segmentIntervals) #Write them to the object which we will return for newStart,newStop in newSegmentIntervals: newDQSeg.append([uifo,uname,uversion,ucomment,newStart,newStop]) newDQSeg.sort() del segmentIntervals #Reset the result list to the IFO restricted set self.resultList=newDQSeg return newDQSeg
def fetchInformationDualWindow(self,triggerTime=None,frontWindow=300,\ backWindow=150,ifoList='DEFAULT'): """ This method is responsible for queries to the data server. The results of the query become an internal list that can be converted into an HTML table. The arguments allow you to query with trigger time of interest and to change the window with each call if desired. The version argument will fetch segments with that version or higher. """ if ifoList=="DEFAULT": ifoList=interferometers if (ifoList == None) or \ (len(ifoList) < 1): sys.stderr.write("Ifolist passed is malformed! : %s\n"%ifoList) return #Set the internal class variable self.ifos self.ifos=ifoList if sum([x.upper() in interferometers for x in ifoList]) < 1: sys.stderr.write("Valid ifos not specified for DQ lookups. %s\n"%ifoList) return triggerTime=float(triggerTime) if triggerTime==int(-1): sys.stdout.write("Specify trigger time please.\n") return else: self.triggerTime = float(triggerTime) gpsEnd=int(triggerTime)+int(backWindow) gpsStart=int(triggerTime)-int(frontWindow) sqlString=self.dqvQueryLatestVersion%(gpsEnd,gpsStart) self.resultList=self.query(sqlString) if len(self.resultList) < 1: sys.stdout.write("Query Completed, Nothing Returned for time %s.\n"%(triggerTime)) #Coalesce the segments for each DQ flag #Reparse the information newDQSeg=list() if self.resultList.__len__() > 0: #Obtain list of all flags, ignore IFOs not specified uniqSegmentName=list() for ifo,name,version,comment,start,end in self.resultList: if (not uniqSegmentName.__contains__((ifo,name,version,comment))) and \ (ifo.strip().upper() in ifoList): uniqSegmentName.append((ifo,name,version,comment)) #Add the SCIENCE segment no matter which IFOs are specified! if ((name.lower().__contains__('science')) and \ not (ifo.strip().upper() in ifoList)): uniqSegmentName.append((ifo,name,version,comment)) #Save textKey for all uniq segments combos for uifo,uname,uversion,ucomment in uniqSegmentName: segmentIntervals=list() #Extra segments based on uniq textKey for ifo,name,version,comment,start,end in self.resultList: if (uifo,uname,uversion,ucomment)==(ifo,name,version,comment): segmentIntervals.append((start,end)) segmentIntervals.sort() #Coalesce those segments newSegmentIntervals=self.__merge__(segmentIntervals) #Write them to the object which we will return for newStart,newStop in newSegmentIntervals: newDQSeg.append([uifo,uname,uversion,ucomment,newStart,newStop]) newDQSeg.sort() del segmentIntervals #Reset the result list to the IFO restricted set self.resultList=newDQSeg return newDQSeg
479,722
def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True except: backgroundPickle=False sys.stderr.write("Error importing the pickle file! %s\n"\ %(pickleLocale)) return for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \
def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True except: backgroundPickle=False sys.stderr.write("Error importing the pickle file! %s\n"\ %(pickleLocale)) return for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \
479,723
def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True except: backgroundPickle=False sys.stderr.write("Error importing the pickle file! %s\n"\ %(pickleLocale)) return for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \
def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True except: backgroundPickle=False sys.stderr.write("Error importing the pickle file! %s\n"\ %(pickleLocale)) return for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \
479,724
def append_process(xmldoc, comment = None, force = None, ds_sq_threshold = None, save_small_coincs = None, vetoes_name = None, verbose = None): process = llwapp.append_process(xmldoc, program = process_program_name, version = __version__, cvs_repository = u"lscsoft", cvs_entry_time = __date__, comment = comment) params = [ (u"--ds-sq-threshold", u"real_8", ds_sq_threshold) ] if comment is not None: params += [(u"--comment", u"lstring", comment)] if force is not None: params += [(u"--force", None, None)] if save_small_coincs is not None: params += [(u"--save-small-coincs", None, None)] if vetoes_name is not None: params += [(u"--vetoes-name", u"lstring", vetoes_name)] if verbose is not None: params += [(u"--verbose", None, None)] ligolw_process.append_process_params(xmldoc, process, params) return process
def append_process(xmldoc, comment = None, force = None, ds_sq_threshold = None, save_small_coincs = None, vetoes_name = None, coinc_end_time_segment = None, verbose = None): process = llwapp.append_process(xmldoc, program = process_program_name, version = __version__, cvs_repository = u"lscsoft", cvs_entry_time = __date__, comment = comment) params = [ (u"--ds-sq-threshold", u"real_8", ds_sq_threshold) ] if comment is not None: params += [(u"--comment", u"lstring", comment)] if force is not None: params += [(u"--force", None, None)] if save_small_coincs is not None: params += [(u"--save-small-coincs", None, None)] if vetoes_name is not None: params += [(u"--vetoes-name", u"lstring", vetoes_name)] if verbose is not None: params += [(u"--verbose", None, None)] ligolw_process.append_process_params(xmldoc, process, params) return process
479,725
def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables #
def append_coinc(self, process_id, node, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables #
479,726
def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables #
def time_slide_id = node.time_slide_id append_coinc(self, time_slide_id = node.time_slide_id process_id, time_slide_id = node.time_slide_id time_slide_id, time_slide_id = node.time_slide_id coinc_def_id, time_slide_id = node.time_slide_id events): time_slide_id = node.time_slide_id # time_slide_id = node.time_slide_id # time_slide_id = node.time_slide_id populate time_slide_id = node.time_slide_id the time_slide_id = node.time_slide_id coinc_event time_slide_id = node.time_slide_id and time_slide_id = node.time_slide_id coinc_event_map time_slide_id = node.time_slide_id tables time_slide_id = node.time_slide_id #
479,727
def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables #
def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables #
479,728
def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables #
def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables #
479,729
def ligolw_rinca( xmldoc, process_id, EventListType, CoincTables, coinc_definer_row, event_comparefunc, thresholds, ntuple_comparefunc = lambda events, offset_vector: False, small_coincs = False, veto_segments = None, verbose = False
def ligolw_rinca( xmldoc, process_id, EventListType, CoincTables, coinc_definer_row, event_comparefunc, thresholds, ntuple_comparefunc = lambda events, offset_vector: False, small_coincs = False, veto_segments = None, verbose = False
479,730
def ligolw_rinca( xmldoc, process_id, EventListType, CoincTables, coinc_definer_row, event_comparefunc, thresholds, ntuple_comparefunc = lambda events, offset_vector: False, small_coincs = False, veto_segments = None, verbose = False
def ligolw_rinca( xmldoc, process_id, EventListType, CoincTables, coinc_definer_row, event_comparefunc, thresholds, ntuple_comparefunc = lambda events, offset_vector: False, small_coincs = False, veto_segments = None, verbose = False
479,731
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
479,732
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
479,733
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
479,734
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
479,735
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
479,736
def update_ids(connection, verbose = False): """ For internal use only. """ table_elems = dbtables.get_xml(connection).getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100.0 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection)
def update_ids(connection, xmldoc=None, verbose = False): """ For internal use only. """ table_elems = dbtables.get_xml(connection).getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100.0 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection)
479,737
def update_ids(connection, verbose = False): """ For internal use only. """ table_elems = dbtables.get_xml(connection).getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100.0 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection)
def update_ids(connection, verbose = False): """ For internal use only. """ if xmldoc: table_elems = xmldoc.getElementsByTagName(ligolw.Table.tagName) else: table_elems = dbtables.get_xml(connection).getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100.0 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection)
479,738
def insert_from_url(connection, url, preserve_ids = False, verbose = False): """ Parse and insert the LIGO Light Weight document at the URL into the database the at the given connection. """ # # load document. this process inserts the document's contents into # the database. the document is unlinked to delete database cursor # objects it retains # utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")).unlink() # # update references to row IDs # if not preserve_ids: update_ids(connection, verbose)
def insert_from_url(connection, url, preserve_ids = False, verbose = False): """ Parse and insert the LIGO Light Weight document at the URL into the database the at the given connection. """ # # load document. this process inserts the document's contents into # the database. the document is unlinked to delete database cursor # objects it retains # xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # # update references to row IDs # if not preserve_ids: update_ids(connection, verbose)
479,739
def insert_from_url(connection, url, preserve_ids = False, verbose = False): """ Parse and insert the LIGO Light Weight document at the URL into the database the at the given connection. """ # # load document. this process inserts the document's contents into # the database. the document is unlinked to delete database cursor # objects it retains # utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")).unlink() # # update references to row IDs # if not preserve_ids: update_ids(connection, verbose)
def insert_from_url(connection, url, preserve_ids = False, verbose = False): """ Parse and insert the LIGO Light Weight document at the URL into the database the at the given connection. """ # # load document. this process inserts the document's contents into # the database. the document is unlinked to delete database cursor # objects it retains # utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")).unlink() # # update references to row IDs # if not preserve_ids: update_ids(connection, xmldoc, verbose) xmldoc.unlink()
479,740
def insert_from_xmldoc(connection, xmldoc, preserve_ids = False, verbose = False): """ Insert the tables from an in-ram XML document into the database at the given connection. """ # # iterate over tables in the XML tree, reconstructing each inside # the database # for tbl in xmldoc.getElementsByTagName(ligolw.Table.tagName): # # instantiate the correct table class # name = dbtables.table.StripTableName(tbl.getAttribute("Name")) if name in dbtables.TableByName: dbtab = dbtables.TableByName[name](tbl.attributes, connection = connection) else: dbtab = dbtables.DBTable(tbl.attributes, connection = connection) # # copy table element child nodes from source XML tree # for elem in tbl.childNodes: if elem.tagName == dbtables.table.TableStream.tagName: dbtab._end_of_columns() dbtab.appendChild(type(elem)(elem.attributes)) # # copy table rows from source XML tree # for row in tbl: dbtab.append(row) dbtab._end_of_rows() # # unlink to delete cursor objects # dbtab.unlink() # # update references to row IDs # if not preserve_ids: update_ids(connection, verbose)
defconnection.commit() insert_from_xmldoc(connection,connection.commit() xmldoc,connection.commit() preserve_idsconnection.commit() =connection.commit() False,connection.commit() verboseconnection.commit() =connection.commit() False):connection.commit() """connection.commit() Insertconnection.commit() theconnection.commit() tablesconnection.commit() fromconnection.commit() anconnection.commit() in-ramconnection.commit() XMLconnection.commit() documentconnection.commit() intoconnection.commit() theconnection.commit() databaseconnection.commit() atconnection.commit() theconnection.commit() givenconnection.commit() connection.connection.commit() """connection.commit() #connection.commit() #connection.commit() iterateconnection.commit() overconnection.commit() tablesconnection.commit() inconnection.commit() theconnection.commit() XMLconnection.commit() tree,connection.commit() reconstructingconnection.commit() eachconnection.commit() insideconnection.commit() #connection.commit() theconnection.commit() databaseconnection.commit() #connection.commit() connection.commit() forconnection.commit() tblconnection.commit() inconnection.commit() xmldoc.getElementsByTagName(ligolw.Table.tagName):connection.commit() #connection.commit() #connection.commit() instantiateconnection.commit() theconnection.commit() correctconnection.commit() tableconnection.commit() classconnection.commit() #connection.commit() connection.commit() nameconnection.commit() =connection.commit() dbtables.table.StripTableName(tbl.getAttribute("Name"))connection.commit() ifconnection.commit() nameconnection.commit() inconnection.commit() dbtables.TableByName:connection.commit() dbtabconnection.commit() =connection.commit() dbtables.TableByName[name](tbl.attributes,connection.commit() connectionconnection.commit() =connection.commit() connection)connection.commit() else:connection.commit() dbtabconnection.commit() =connection.commit() dbtables.DBTable(tbl.attributes,connection.commit() connectionconnection.commit() =connection.commit() connection)connection.commit() connection.commit() #connection.commit() #connection.commit() copyconnection.commit() tableconnection.commit() elementconnection.commit() childconnection.commit() nodesconnection.commit() fromconnection.commit() sourceconnection.commit() XMLconnection.commit() treeconnection.commit() #connection.commit() connection.commit() forconnection.commit() elemconnection.commit() inconnection.commit() tbl.childNodes:connection.commit() ifconnection.commit() elem.tagNameconnection.commit() ==connection.commit() dbtables.table.TableStream.tagName:connection.commit() dbtab._end_of_columns()connection.commit() dbtab.appendChild(type(elem)(elem.attributes))connection.commit() connection.commit() #connection.commit() #connection.commit() copyconnection.commit() tableconnection.commit() rowsconnection.commit() fromconnection.commit() sourceconnection.commit() XMLconnection.commit() treeconnection.commit() #connection.commit() connection.commit() forconnection.commit() rowconnection.commit() inconnection.commit() tbl:connection.commit() dbtab.append(row)connection.commit() dbtab._end_of_rows()connection.commit() connection.commit() #connection.commit() #connection.commit() unlinkconnection.commit() toconnection.commit() deleteconnection.commit() cursorconnection.commit() objectsconnection.commit() #connection.commit() connection.commit() dbtab.unlink()connection.commit() connection.commit() #connection.commit() #connection.commit() updateconnection.commit() referencesconnection.commit() toconnection.commit() rowconnection.commit() IDsconnection.commit() #connection.commit() connection.commit() ifconnection.commit() notconnection.commit() preserve_ids:connection.commit() update_ids(connection,connection.commit() verbose)connection.commit() connection.commit()
479,741
def insert_from_xmldoc(connection, xmldoc, preserve_ids = False, verbose = False): """ Insert the tables from an in-ram XML document into the database at the given connection. """ # # iterate over tables in the XML tree, reconstructing each inside # the database # for tbl in xmldoc.getElementsByTagName(ligolw.Table.tagName): # # instantiate the correct table class # name = dbtables.table.StripTableName(tbl.getAttribute("Name")) if name in dbtables.TableByName: dbtab = dbtables.TableByName[name](tbl.attributes, connection = connection) else: dbtab = dbtables.DBTable(tbl.attributes, connection = connection) # # copy table element child nodes from source XML tree # for elem in tbl.childNodes: if elem.tagName == dbtables.table.TableStream.tagName: dbtab._end_of_columns() dbtab.appendChild(type(elem)(elem.attributes)) # # copy table rows from source XML tree # for row in tbl: dbtab.append(row) dbtab._end_of_rows() # # unlink to delete cursor objects # dbtab.unlink() # # update references to row IDs # if not preserve_ids: update_ids(connection, verbose)
def insert_from_xmldoc(connection, xmldoc, preserve_ids = False, verbose = False): """ Insert the tables from an in-ram XML document into the database at the given connection. """ # # iterate over tables in the XML tree, reconstructing each inside # the database # for tbl in xmldoc.getElementsByTagName(ligolw.Table.tagName): # # instantiate the correct table class # name = dbtables.table.StripTableName(tbl.getAttribute("Name")) if name in dbtables.TableByName: dbtab = dbtables.TableByName[name](tbl.attributes, connection = connection) else: dbtab = dbtables.DBTable(tbl.attributes, connection = connection) # # copy table element child nodes from source XML tree # for elem in tbl.childNodes: if elem.tagName == dbtables.table.TableStream.tagName: dbtab._end_of_columns() dbtab.appendChild(type(elem)(elem.attributes)) # # copy table rows from source XML tree # for row in tbl: dbtab.append(row) dbtab._end_of_rows() # # unlink to delete cursor objects # dbtab.unlink() # # update references to row IDs # if not preserve_ids: update_ids(connection, None, verbose)
479,742
def directional_horizon(ifos, RA, dec, gps_time, horizons=None): """ Return a dictionary of sensitivity numbers for each detector, based on a known sky location and an optional input dictionary of inspiral horizon distances for a reference source of the user's choice. If the horizons dictionary is specified, the returned values are interpreted as inspiral horizons in that direction. """ # Convert type if necessary if type(gps_time)==int: gps_time=float(gps_time)
def directional_horizon(ifos, RA, dec, gps_time, horizons=None): """ Return a dictionary of sensitivity numbers for each detector, based on a known sky location and an optional input dictionary of inspiral horizon distances for a reference source of the user's choice. If the horizons dictionary is specified, the returned values are interpreted as inspiral horizons in that direction. """ # Convert type if necessary if type(gps_time)==int: gps_time=float(gps_time)
479,743
def detector_thresholds(horizons,min_threshold,max_threshold=7.5): """ Return a set of detector thresholds adjusted for a particular set of inspiral horizon distances (calculated with directional_horizon). The min_threshold specified the minimum threshold which will be set for all detectors less sensitive than the best one. The most sensitive detector will have its threshold adjusted upward to a maximum of max_threshold. """ assert min_threshold < max_threshold threshs={} worst_horizon=min(horizons.values()) best_horizon=max(horizons.values()) # Assuming that lowest threshold is in worst detector, return thresholds for det in horizons.keys(): if horizons[det]<best_horizon: threshs[det]=min_threshold else: threshs[det]=min_threshold*(horizons[det]/worst_horizon) if threshs[det]>max_threshold: threshs[det]=max_threshold return threshs
def detector_thresholds(horizons,min_threshold,max_threshold=7.5): """ Return a set of detector thresholds adjusted for a particular set of inspiral horizon distances (calculated with directional_horizon). The min_threshold specified the minimum threshold which will be set for all detectors less sensitive than the best one. The most sensitive detector will have its threshold adjusted upward to a maximum of max_threshold. """ assert min_threshold < max_threshold threshs={} worst_horizon=min(horizons.values()) best_horizon=max(horizons.values()) # Assuming that lowest threshold is in worst detector, return thresholds for det in horizons.keys(): if horizons[det]<best_horizon: threshs[det]=min_threshold else: threshs[det]=min_threshold*(horizons[det]/worst_horizon) if threshs[det]>max_threshold: threshs[det]=max_threshold return threshs
479,744
def fetchInformation(self,triggerTime=None,window=300): """ Wrapper for fetchInformationDualWindow that mimics original behavior """ return self.fetchInformationDualWindow(triggerTime,window,window,ifoList='DEFAULT')
deffetchInformation(self,triggerTime=None,window=300):"""WrapperforfetchInformationDualWindowthatmimicsoriginalbehavior"""returnself.fetchInformationDualWindow(triggerTime,window,window,ifoList='DEFAULT')
479,745
def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \
def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \
479,746
def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \
def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True except: backgroundPickle=False sys.stderr.write("Error importing the pickle file! %s\n"\ %(pickleLocale)) return for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \
479,747
def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \
def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \
479,748
def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating
def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in self.ifos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating
479,749
def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating
def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulation of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating
479,750
def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating
def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo in seenFlags.keys(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating
479,751
def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating
def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() if myIfo.strip() not in self.__backgroundDict__.keys(): if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for outsideFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][outsideFlag]=float(-0.0) else: for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating
479,752
def generateHTMLTable(self,tableType="BOTH"): """ Return a HTML table already formatted using the module MARKUP to keep the HTML tags complient. This method does nothing but return the result of the last call to self.fetchInformation() The flag names associated with LIGO will have links to the channel wiki in them also. Types that will invoke a not everything behaviour are DQ and VETO """ ligo=["L1","H1","H2","V1"] channelWiki="https://ldas-jobs.ligo.caltech.edu/cgi-bin/chanwiki?%s" if self.triggerTime==int(-1): return "" myColor="grey" tableString="<table bgcolor=grey border=1px>" titleString="<tr>" tableEmptyString="<tr bgcolor=%s>"%myColor rowString="<tr bgcolor=%s> " for col in self.__columns__: titleString+="<th>%s</th>"%col rowString+="<td>%s</td>" tableEmptyString+="<td>None</td>" titleString+="</tr>\n" tableEmptyString+="</tr>\n" rowString+="</tr>\n" tableString+=titleString if len(self.resultList) == 0: tableString+=tableEmptyString for ifo,name,version,comment,start,stop in self.resultList: #If we have background information fetch it if self.__haveBackgroundDict__: myBackgroundRank=str("%3.1f"%(100.0*self.__backgroundResults__[ifo][name])).rjust(5) else: myBackgroundRank="None" if self.__havecategories__: myCategory=self.__category__[ifo][name] else: myCategory="None" offset1=start-self.triggerTime offset2=stop-self.triggerTime size=int(stop-start) if (offset1>=0) and (offset2>=0): myColor="green" if (offset1<=0) and (offset2<=0): myColor="yellow" if (offset1<=0) and (offset2>=0): myColor="red" if name.lower().__contains__('science'): myColor="skyblue" if tableType.upper().strip() == "DQ": if not name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) elif tableType.upper().strip() == "VETO": if name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) elif tableType.upper().strip() not in ["VETO","DQ"]: tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,size,\ myBackgroundRank,myCategory) tableString+="</table>" return tableString
def generateHTMLTable(self,tableType="BOTH"): """ Return a HTML table already formatted using the module MARKUP to keep the HTML tags complient. This method does nothing but return the result of the last call to self.fetchInformation() The flag names associated with LIGO will have links to the channel wiki in them also. Types that will invoke a not everything behaviour are DQ and VETO """ ligo=["L1","H1","H2","V1"] channelWiki="https://ldas-jobs.ligo.caltech.edu/cgi-bin/chanwiki?%s" if self.triggerTime==int(-1): return "" myColor="grey" tableString="<table bgcolor=grey border=1px>" titleString="<tr>" tableEmptyString="<tr bgcolor=%s>"%myColor rowString="<tr bgcolor=%s> " for col in self.__columns__: titleString+="<th>%s</th>"%col rowString+="<td>%s</td>" tableEmptyString+="<td>None</td>" titleString+="</tr>\n" tableEmptyString+="</tr>\n" rowString+="</tr>\n" tableString+=titleString if len(self.resultList) == 0: tableString+=tableEmptyString for ifo,name,version,comment,start,stop in self.resultList: #If we have background information fetch it if self.__haveBackgroundDict__: myBackgroundRank=str("%3.1f"%(100.0*self.__backgroundResults__[ifo][name])).rjust(5) else: myBackgroundRank="None" if self.__havecategories__: myCategory=self.__category__[ifo][name] else: myCategory="None" offset1=start-self.triggerTime offset2=stop-self.triggerTime size=int(stop-start) if (offset1>=0) and (offset2>=0): myColor="green" if (offset1<=0) and (offset2<=0): myColor="yellow" if (offset1<=0) and (offset2>=0): myColor="red" if name.lower().__contains__('science'): myColor="skyblue" if tableType.upper().strip() == "DQ": if not name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,\ size,myBackgroundRank,myCategory) elif tableType.upper().strip() == "VETO": if name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,\ size,myBackgroundRank,myCategory) elif tableType.upper().strip() not in ["VETO","DQ"]: tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,size,\ myBackgroundRank,myCategory) tableString+="</table>" return tableString
479,753
def generateHTMLTable(self,tableType="BOTH"): """ Return a HTML table already formatted using the module MARKUP to keep the HTML tags complient. This method does nothing but return the result of the last call to self.fetchInformation() The flag names associated with LIGO will have links to the channel wiki in them also. Types that will invoke a not everything behaviour are DQ and VETO """ ligo=["L1","H1","H2","V1"] channelWiki="https://ldas-jobs.ligo.caltech.edu/cgi-bin/chanwiki?%s" if self.triggerTime==int(-1): return "" myColor="grey" tableString="<table bgcolor=grey border=1px>" titleString="<tr>" tableEmptyString="<tr bgcolor=%s>"%myColor rowString="<tr bgcolor=%s> " for col in self.__columns__: titleString+="<th>%s</th>"%col rowString+="<td>%s</td>" tableEmptyString+="<td>None</td>" titleString+="</tr>\n" tableEmptyString+="</tr>\n" rowString+="</tr>\n" tableString+=titleString if len(self.resultList) == 0: tableString+=tableEmptyString for ifo,name,version,comment,start,stop in self.resultList: #If we have background information fetch it if self.__haveBackgroundDict__: myBackgroundRank=str("%3.1f"%(100.0*self.__backgroundResults__[ifo][name])).rjust(5) else: myBackgroundRank="None" if self.__havecategories__: myCategory=self.__category__[ifo][name] else: myCategory="None" offset1=start-self.triggerTime offset2=stop-self.triggerTime size=int(stop-start) if (offset1>=0) and (offset2>=0): myColor="green" if (offset1<=0) and (offset2<=0): myColor="yellow" if (offset1<=0) and (offset2>=0): myColor="red" if name.lower().__contains__('science'): myColor="skyblue" if tableType.upper().strip() == "DQ": if not name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) elif tableType.upper().strip() == "VETO": if name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) elif tableType.upper().strip() not in ["VETO","DQ"]: tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,size,\ myBackgroundRank,myCategory) tableString+="</table>" return tableString
def generateHTMLTable(self,tableType="BOTH"): """ Return a HTML table already formatted using the module MARKUP to keep the HTML tags complient. This method does nothing but return the result of the last call to self.fetchInformation() The flag names associated with LIGO will have links to the channel wiki in them also. Types that will invoke a not everything behaviour are DQ and VETO """ ligo=["L1","H1","H2","V1"] channelWiki="https://ldas-jobs.ligo.caltech.edu/cgi-bin/chanwiki?%s" if self.triggerTime==int(-1): return "" myColor="grey" tableString="<table bgcolor=grey border=1px>" titleString="<tr>" tableEmptyString="<tr bgcolor=%s>"%myColor rowString="<tr bgcolor=%s> " for col in self.__columns__: titleString+="<th>%s</th>"%col rowString+="<td>%s</td>" tableEmptyString+="<td>None</td>" titleString+="</tr>\n" tableEmptyString+="</tr>\n" rowString+="</tr>\n" tableString+=titleString if len(self.resultList) == 0: tableString+=tableEmptyString for ifo,name,version,comment,start,stop in self.resultList: #If we have background information fetch it if self.__haveBackgroundDict__: myBackgroundRank=str("%3.1f"%(100.0*self.__backgroundResults__[ifo][name])).rjust(5) else: myBackgroundRank="None" if self.__havecategories__: myCategory=self.__category__[ifo][name] else: myCategory="None" offset1=start-self.triggerTime offset2=stop-self.triggerTime size=int(stop-start) if (offset1>=0) and (offset2>=0): myColor="green" if (offset1<=0) and (offset2<=0): myColor="yellow" if (offset1<=0) and (offset2>=0): myColor="red" if name.lower().__contains__('science'): myColor="skyblue" if tableType.upper().strip() == "DQ": if not name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,\ size,myBackgroundRank,myCategory) elif tableType.upper().strip() == "VETO": if name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,\ size,myBackgroundRank,myCategory) elif tableType.upper().strip() not in ["VETO","DQ"]: tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,size,\ myBackgroundRank,myCategory) tableString+="</table>" return tableString
479,754
def generateMOINMOINTable(self,tableType="BOTH"): """ Return a MOINMOIN table. """ ligo=["L1","H1","H2","V1"] channelWiki="https://ldas-jobs.ligo.caltech.edu/cgi-bin/chanwiki?%s" if self.triggerTime==int(-1): return "" myColor="grey" tableString="" titleString="" emptyRowString="" rowString="" for i,col in enumerate(self.__columns__): if i == 0: titleString+="""||<rowbgcolor="%s"> %s """%(myColor,col) rowString+="""||<rowbgcolor="%s"> %s """ emptyRowString+="""||<rowbgcolor="%s"> None """%myColor else: titleString+="""|| %s """%col rowString+="""|| %s """ emptyRowString+="""|| None """ titleString+="""||\n""" rowString+="""||\n""" emptyRowString+="""||\n""" tableString+=titleString #Extract only DQ row or only VETO rows tmpResultList=list() for myRow in self.resultList: ifo,name,version,comment,start,stop=myRow #Select base on table type if ((tableType.upper() == "DQ") and \ (not name.strip().upper().startswith("UPV"))): tmpResultList.append(myRow) elif ((tableType.upper() == "VETO") and \ (name.strip().upper().startswith("UPV"))): tmpResultList.append(myRow) elif tableType.upper().strip() not in ["VETO","DQ"]: tmpResultList.append(myRow) if len(tmpResultList) == 0: tableString+=emptyRowString%myColor for ifo,name,version,comment,start,stop in tmpResultList: #If we have background information fetch it if self.__haveBackgroundDict__: myBackgroundRank=str("%3.1f"%(100.0*self.__backgroundResults__[ifo][name])).rjust(5) else: myBackgroundRank="None" if self.__havecategories__: myCategory=self.__category__[ifo][name] else: myCategory="None" offset1=start-self.triggerTime offset2=stop-self.triggerTime size=int(stop-start) if (offset1>=0) and (offset2>=0): myColor="green" if (offset1<=0) and (offset2<=0): myColor="yellow" if (offset1<=0) and (offset2>=0): myColor="red" if name.lower().__contains__('science'): myColor="skyblue" tableString+=rowString%(myColor,str(ifo).strip(),name,version,\ start,offset1,stop,offset2,size,\ myBackgroundRank,myCategory) tableString+="\n" return tableString
def generateMOINMOINTable(self,tableType="BOTH"): """ Return a MOINMOIN table. """ ligo=["L1","H1","H2","V1"] channelWiki="https://ldas-jobs.ligo.caltech.edu/cgi-bin/chanwiki?%s" if self.triggerTime==int(-1): return "" myColor="grey" tableString="" titleString="" emptyRowString="" rowString="" for i,col in enumerate(self.__columns__): if i == 0: titleString+="""||<rowbgcolor="%s"> %s """%(myColor,col) rowString+="""||<rowbgcolor="%s"> %s """ emptyRowString+="""||<rowbgcolor="%s"> None """%myColor else: titleString+="""|| %s """%col rowString+="""|| %s """ emptyRowString+="""|| None """ titleString+="""||\n""" rowString+="""||\n""" emptyRowString+="""||\n""" tableString+=titleString #Extract only DQ row or only VETO rows tmpResultList=list() for myRow in self.resultList: ifo,name,version,comment,start,stop=myRow #Select base on table type if ((tableType.upper() == "DQ") and \ (not name.strip().upper().startswith("UPV"))): tmpResultList.append(myRow) elif ((tableType.upper() == "VETO") and \ (name.strip().upper().startswith("UPV"))): tmpResultList.append(myRow) elif tableType.upper().strip() not in ["VETO","DQ"]: tmpResultList.append(myRow) if len(tmpResultList) == 0: tableString+=emptyRowString for ifo,name,version,comment,start,stop in tmpResultList: #If we have background information fetch it if self.__haveBackgroundDict__: myBackgroundRank=str("%3.1f"%(100.0*self.__backgroundResults__[ifo][name])).rjust(5) else: myBackgroundRank="None" if self.__havecategories__: myCategory=self.__category__[ifo][name] else: myCategory="None" offset1=start-self.triggerTime offset2=stop-self.triggerTime size=int(stop-start) if (offset1>=0) and (offset2>=0): myColor="green" if (offset1<=0) and (offset2<=0): myColor="yellow" if (offset1<=0) and (offset2>=0): myColor="red" if name.lower().__contains__('science'): myColor="skyblue" tableString+=rowString%(myColor,str(ifo).strip(),name,version,\ start,offset1,stop,offset2,size,\ myBackgroundRank,myCategory) tableString+="\n" return tableString
479,755
def run(self): # remove the automatically generated user env scripts for script in ["pylal-user-env.sh", "pylal-user-env.csh"]: log.info("removing " + script ) try: os.unlink(os.path.join("etc", script)) except: pass
def run(self): # remove the automatically generated user env scripts for script in ["pylal-user-env.sh", "pylal-user-env.csh"]: log.info("removing " + script ) try: os.unlink(os.path.join("etc", script)) except: pass
479,756
def setup(target, check_same_thread=True): connection = sqlite3.connect(target, check_same_thread=check_same_thread) dbtables.DBTable_set_connection(connection) for tbl in dbtables.get_xml(connection).getElementsByTagName(ligolw.Table.tagName): tbl.sync_next_id() return connection
def setup(target, check_same_thread=True): connection = sqlite3.connect(target, check_same_thread=check_same_thread) dbtables.DBTable_set_connection(connection) dbtables.idmap_sync(connection) return connection
479,757
def update_ids(xmldoc, connection, verbose = False): """ For internal use only. """ table_elems = xmldoc.getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection)
def update_ids(connection, verbose = False): """ For internal use only. """ table_elems = xmldoc.getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection)
479,758
def update_ids(xmldoc, connection, verbose = False): """ For internal use only. """ table_elems = xmldoc.getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection)
def update_ids(xmldoc, connection, verbose = False): """ For internal use only. """ table_elems = dbtables.get_xml(connection).getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection)
479,759
def update_ids(xmldoc, connection, verbose = False): """ For internal use only. """ table_elems = xmldoc.getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection)
def update_ids(xmldoc, connection, verbose = False): """ For internal use only. """ table_elems = xmldoc.getElementsByTagName(ligolw.Table.tagName) for i, tbl in enumerate(table_elems): if verbose: print >>sys.stderr, "updating IDs: %d%%\r" % (100.0 * i / len(table_elems)), tbl.applyKeyMapping() if verbose: print >>sys.stderr, "updating IDs: 100%" # reset ID mapping for next document dbtables.idmap_reset(connection)
479,760
def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append
def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs, calling insert_from_url() on each, then build the indexes indicated by the metadata in lsctables.py. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append
479,761
def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append
definsert_from_urls(connection,urls,preserve_ids=False,verbose=False):"""IterateoverasequenceofURLsandparseandinserteachoneintothedatabasethedbtables.DBTableclassiscurrentlyconnectedto."""orig_DBTable_append=dbtables.DBTable.appendifnotpreserve_ids:#enableIDremappingdbtables.idmap_create(connection)dbtables.DBTable.append=dbtables.DBTable._remapping_appendelse:#disableIDremappingdbtables.DBTable.append=dbtables.DBTable._appendforn,urlinenumerate(urls):#loaddocument(ifenabled,rowIDsarereassignedon#input)ifverbose:print>>sys.stderr,"%d/%d:"%(n+1,len(urls)),xmldoc=utils.load_url(url,verbose=verbose,gz=(urlor"stdin").endswith(".gz"))#updatereferencestorowIDsifnotpreserve_ids:update_ids(xmldoc,connection,verbose)#deletecursorsxmldoc.unlink()connection.commit()dbtables.build_indexes(connection,verbose)dbtables.DBTable.append=orig_DBTable_append
479,762
def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append
definsert_from_urls(connection,urls,preserve_ids=False,verbose=False):"""IterateoverasequenceofURLsandparseandinserteachoneintothedatabasethedbtables.DBTableclassiscurrentlyconnectedto."""orig_DBTable_append=dbtables.DBTable.appendifnotpreserve_ids:#enableIDremappingdbtables.idmap_create(connection)dbtables.DBTable.append=dbtables.DBTable._remapping_appendelse:#disableIDremappingdbtables.DBTable.append=dbtables.DBTable._appendforn,urlinenumerate(urls):#loaddocument(ifenabled,rowIDsarereassignedon#input)ifverbose:print>>sys.stderr,"%d/%d:"%(n+1,len(urls)),xmldoc=utils.load_url(url,verbose=verbose,gz=(urlor"stdin").endswith(".gz"))#updatereferencestorowIDsifnotpreserve_ids:update_ids(xmldoc,connection,verbose)#deletecursorsxmldoc.unlink()connection.commit()dbtables.build_indexes(connection,verbose)dbtables.DBTable.append=orig_DBTable_append
479,763
def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append
def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz"))# update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append
479,764
def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append
def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append
479,765
def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append
def insert_from_urls(connection, urls, preserve_ids = False, verbose = False): """ Iterate over a sequence of URLs and parse and insert each one into the database the dbtables.DBTable class is currently connected to. """ orig_DBTable_append = dbtables.DBTable.append if not preserve_ids: # enable ID remapping dbtables.idmap_create(connection) dbtables.DBTable.append = dbtables.DBTable._remapping_append else: # disable ID remapping dbtables.DBTable.append = dbtables.DBTable._append for n, url in enumerate(urls): # load document (if enabled, row IDs are reassigned on # input) if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), xmldoc = utils.load_url(url, verbose = verbose, gz = (url or "stdin").endswith(".gz")) # update references to row IDs if not preserve_ids: update_ids(xmldoc, connection, verbose) # delete cursors xmldoc.unlink() connection.commit() dbtables.build_indexes(connection, verbose) dbtables.DBTable.append = orig_DBTable_append
479,766
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1 width=100%><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1 width=100%><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
479,767
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) figname=param+'.png' oneDplotPath=os.path.join(outdir,figname) plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
479,768
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,figname.replace('.png','_samps.png'))) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
479,769
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(figname) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
479,770
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
479,771
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
479,772
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
479,773
def populate_SkyLocInjTable(skylocinjtable,coinc,dt_area,rank_area, \ dtrss_inj,dDrss_inj): """ given an instance of skypoints populate and return skylocinjtable """ row = skylocinjtable.RowType() row.end_time = coinc.time rhosquared = 0.0 for ifo in coinc.ifo_list: rhosquared += coinc.snr[ifo]*coinc.snr[ifo] row.comb_snr = sqrt(rhosquared) try: row.h1_snr = coinc.snr['H1'] except: row.h1_snr = None try: row.l1_snr = coinc.snr['L1'] except: row.l1_snr = None try: row.v1_snr = coinc.snr['V1'] except: row.v1_snr = None row.ra = coinc.longitude_inj row.dec = coinc.latitude_inj row.dt_area = dt_area row.rank_area = rank_area row.delta_t_rss = dtrss_inj row.delta_D_rss = dDrss_inj try: row.h1_eff_distance = coinc.eff_distances_inj['H1'] except: row.h1_eff_distance = None try: row.l1_eff_distance = coinc.eff_distances_inj['L1'] except: row.l1_eff_distance = None try: row.v1_eff_distance = coinc.eff_distances_inj['V1'] except: row.v1_eff_distance = None row.mass1 = coinc.mass1_inj row.mass2 = coinc.mass2_inj skylocinjtable.append(row)
def populate_SkyLocInjTable(skylocinjtable,coinc,dt_area,rank_area, \ dtrss_inj,dDrss_inj): """ record injection data in a skylocinjtable """ row = skylocinjtable.RowType() row.end_time = coinc.time rhosquared = 0.0 for ifo in coinc.ifo_list: rhosquared += coinc.snr[ifo]*coinc.snr[ifo] row.comb_snr = sqrt(rhosquared) try: row.h1_snr = coinc.snr['H1'] except: row.h1_snr = None try: row.l1_snr = coinc.snr['L1'] except: row.l1_snr = None try: row.v1_snr = coinc.snr['V1'] except: row.v1_snr = None row.ra = coinc.longitude_inj row.dec = coinc.latitude_inj row.dt_area = dt_area row.rank_area = rank_area row.delta_t_rss = dtrss_inj row.delta_D_rss = dDrss_inj try: row.h1_eff_distance = coinc.eff_distances_inj['H1'] except: row.h1_eff_distance = None try: row.l1_eff_distance = coinc.eff_distances_inj['L1'] except: row.l1_eff_distance = None try: row.v1_eff_distance = coinc.eff_distances_inj['V1'] except: row.v1_eff_distance = None row.mass1 = coinc.mass1_inj row.mass2 = coinc.mass2_inj skylocinjtable.append(row)
479,774
def skyhist_cart(skycarts,samples): """ Histogram the list of samples into bins defined by Cartesian vectors in skycarts """ dot=numpy.dot N=len(skycarts) print 'operating on %d sky points'%(N) bins=zeros(N) for sample in samples: sampcart=pol2cart(sample[RAdim],sample[decdim]) maxdx=max(xrange(0,N),key=lambda i:dot(sampcart,skycarts[i])) bins[maxdx]+=1 return (skycarts,bins)
def skyhist_cart(skycarts,samples): """ Histogram the list of samples into bins defined by Cartesian vectors in skycarts """ dot=numpy.dot N=len(skycarts) print 'operating on %d sky points'%(N) bins=zeros(N) for sample in samples: sampcart=pol2cart(sample[RAdim],sample[decdim]) maxdot=0 for i in range(0,N): thisdot=dot(sampcart,skycarts[i]) if thisdot>maxdot: maxdot=thisdot maxdx=i bins[maxdx]+=1 return (skycarts,bins)
479,775
def __init__(self,dag,job,cp,opts): pipeline.CondorDAGNode.__init__(self,job) #Specify pipe location self.add_var_opt('followup-directory',cp.get("makeCheckListWiki", "location").strip()) #Specify pipe ini file self.add_var_opt('ini-file',cp.get("makeCheckListWiki", "ini-file").strip()) if not opts.disable_dag_categories: self.set_category(job.name.lower()) #Add this as child of all known jobs for parentNode in dag.get_nodes(): self.add_parent(parentNode) dag.add_node(self)
def __init__(self,dag,job,cp,opts): pipeline.CondorDAGNode.__init__(self,job) #Specify pipe location self.add_var_opt('followup-directory',cp.get("makeCheckListWiki", "location").strip()) #Specify pipe ini file self.add_var_opt('ini-file',cp.get("makeCheckListWiki", "ini-file").strip()) if not opts.disable_dag_categories: self.set_category(job.name.lower()) #Add this as child of all known jobs for parentNode in dag.get_nodes(): self.add_parent(parentNode) dag.add_node(self)
479,776
def setup_distrib_script(self,tag_base): distrib_script = open('distribRemoteScan_'+tag_base+'.sh','w') distrib_script.write("""#!/bin/bash
def setup_distrib_script(self,tag_base): distrib_script = open('distribRemoteScan_'+tag_base+'.sh','w') distrib_script.write("""#!/bin/bash
479,777
def setup_distrib_script(self,tag_base): distrib_script = open('distribRemoteScan_'+tag_base+'.sh','w') distrib_script.write("""#!/bin/bash
def setup_distrib_script(self,tag_base): distrib_script = open('distribRemoteScan_'+tag_base+'.sh','w') distrib_script.write("""#!/bin/bash
479,778
def __init__(self,job,coinc,cp,opts,dag,ifo,ifonames,p_nodes): pipeline.CondorDAGNode.__init__(self,job)
def __init__(self,job,coinc,cp,opts,dag,ifo,ifonames,p_nodes): pipeline.CondorDAGNode.__init__(self,job)
479,779
def __init__(self, configfile=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind")
def __init__(self, configfile=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind")
479,780
def __patchFrameTypeDef__(frametype=None,ifo=None,gpstime=None): """ Temporary patch function, to adjust specfied frame type used in searching the filesystem for files to display in followup. """ if frametype == None: raise Exception, "input to __patchFrameTypeDef__ included a \ frametype argument specified as None\n" return None if gpstime == None: raise Warning, "input to __patchFrameTypeDef__ included a \ gps time argument specified as None\n" return frametype if ifo == None: raise Warning, "input to __patchFrameTypeDef__ included an \ ifo argument specified as None\n" return frametype endOfS5=int(875232014) if int(gpstime)<=endOfS5: if not frametype.lower().startswith(ifo.lower()): return ifo+"_"+frametype return frametype
def __patchFrameTypeDef__(frametype=None,ifo=None,gpstime=None): """ Temporary patch function, to adjust specfied frame type used in searching the filesystem for files to display in followup. """ if frametype == None: raise Exception, "input to __patchFrameTypeDef__ included a \ frametype argument specified as None\n" return None if gpstime == None: raise Warning, "input to __patchFrameTypeDef__ included a \ gps time argument specified as None\n" return frametype if ifo == None: raise Warning, "input to __patchFrameTypeDef__ included an \ ifo argument specified as None\n" return frametype endOfS5=int(875232014) if int(gpstime)<=endOfS5 or ifo=="V1": if not frametype.lower().startswith(ifo.lower()): return ifo+"_"+frametype return frametype
479,781
def getinjpar(inj,parnum): if parnum==0: return inj.mchirp if parnum==1: return inj.eta if parnum==2: return inj.get_end() if parnum==3: return inj.phi0 if parnum==4: return inj.distance if parnum==5: return inj.longitude if parnum==6: return inj.latitude if parnum==7: return inj.polarization if parnum==8: return inj.inclination return None
def getinjpar(inj,parnum): if parnum==0: return inj.mchirp if parnum==1: return inj.eta if parnum==2: return inj.get_end() if parnum==3: return inj.phi0 if parnum==4: return inj.distance if parnum==5: return inj.longitude if parnum==6: return inj.latitude if parnum==7: return inj.polarization if parnum==8: return inj.inclination return None
479,782
def script_dict(): script = {} tog = create_toggle() script[tog] = 'javascript' script['http://ajax.googleapis.com/ajax/libs/jquery/1.2.6/jquery.min.js'] = 'javascript' return (script, [tog])
def script_dict(fname): script = {} tog = create_toggle() script[tog] = 'javascript' script['http://ajax.googleapis.com/ajax/libs/jquery/1.2.6/jquery.min.js'] = 'javascript' return (script, [tog])
479,783
def script_dict(): script = {} tog = create_toggle() script[tog] = 'javascript' script['http://ajax.googleapis.com/ajax/libs/jquery/1.2.6/jquery.min.js'] = 'javascript' return (script, [tog])
def script_dict(): script = {} tog = os.path.split(create_toggle(fname))[1] script[tog] = 'javascript' script['http://ajax.googleapis.com/ajax/libs/jquery/1.2.6/jquery.min.js'] = 'javascript' return (script, [tog])
479,784
def copy_ihope_style(stylefile="cbcwebpage.css", base_dir="."): # FIXME this is a stupid way to find the path... changes to build scripts, set env var? path = which('ligo_data_find') if path: path = os.path.split(path)[0] else: print >>sys.stderr, "COULD NOT FIND STYLE FILES %s IN %s, ABORTING" % (stylefile, path) raise ValueError sys.exit(1) out = path.replace('bin','etc') + '/' + stylefile if not os.path.isfile(out): print >>sys.stderr, "COULD NOT FIND STYLE FILES %s IN %s, ABORTING" % (stylefile, path) raise ValueError sys.exit(1) shutil.copy(out, base_dir) return base_dir + '/' + os.path.split(out.rstrip('/'))[1]
def copy_ihope_style(stylefile="cbcwebpage.css", base_dir="."): # FIXME this is a stupid way to find the path... changes to build scripts, set env var? path = which('ligo_data_find') if path: path = os.path.split(path)[0] else: print >>sys.stderr, "COULD NOT FIND STYLE FILES %s IN %s, ABORTING" % (stylefile, path) raise ValueError sys.exit(1) out = path.replace('bin','etc') + '/' + stylefile if not os.path.isfile(out): print >>sys.stderr, "COULD NOT FIND STYLE FILES %s IN %s, ABORTING" % (stylefile, path) raise ValueError sys.exit(1) shutil.copy(out, base_dir) return os.path.split(out.rstrip('/'))[1]
479,785
def __init__(self, title="cbc web page", path='./', css=None, script=None, pagenum=1, verbose=False): """ """ if not css: css = copy_ihope_style() scdict = script_dict() if not script: script = scdict[0] self.front = "" scriptfiles = scdict[1] self.verbose = verbose self._style = css self._title = title self._script = script self.path = path self.pagenum = pagenum
def __init__(self, title="cbc web page", path='./', css=None, script=None, pagenum=1, verbose=False): """ """ if not css: css = copy_ihope_style(base_dir=path) scdict = script_dict(fname='%s/%s' % (path,"toggle.js")) if not script: script = scdict[0] self.front = "" scriptfiles = scdict[1] self.verbose = verbose self._style = css self._title = title self._script = script self.path = path self.pagenum = pagenum
479,786
def add_subpage(self, tag, title, link_text=None): """ """
def add_subpage(self, tag, title, link_text=None): """ """
479,787
def get_signal_vetoes(trigger,bankq=0,bankn=0,autoq=0,auton=0,chiq=0,chin=0,sigmaVals = None,fResp = None): sbvs = {} q = bankq nhigh = bankn q2 = autoq nhigh2 = auton if trigger.chisq == 0: sbvs['BestNR1'] = 0 else: if trigger.chisq < 60: sbvs['BestNR1'] = trigger.snr else: sbvs['BestNR1'] = trigger.snr/((1 + (trigger.chisq/60.)**(chiq/chin))/2.)**(1./chiq)
def get_signal_vetoes(trigger,bankq=0,bankn=0,autoq=0,auton=0,chiq=0,chin=0,sigmaVals = None,fResp = None): sbvs = {} q = bankq nhigh = bankn q2 = autoq nhigh2 = auton if trigger.chisq == 0: sbvs['BestNR1'] = 0 else: if trigger.chisq < 60: sbvs['BestNR1'] = trigger.snr else: sbvs['BestNR1'] = trigger.snr/((1 + (trigger.chisq/60.)**(chiq/chin))/2.)**(1./chiq)
479,788
def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhist_cart(skycarts,list(pos)) bins=skycarts # Find the bin of the injection if available injbin=None if sky_injpoint: injhist=skyhist_cart(skycarts,array([sky_injpoint])) injbin=injhist.tolist().index(1) print 'Found injection in bin %d with co-ordinates %f,%f .'%(injbin,skypoints[injbin,0],skypoints[injbin,1]) (skyinjectionconfidence,toppoints,skyreses)=bayespputils.calculateConfidenceLevels(shist,skypoints,injbin,float(opts.skyres),confidence_levels,len(pos)) if injbin and skyinjectionconfidence: i=list(nonzero(np.asarray(toppoints)[:,2]==injbin))[0] min_sky_area_containing_injection=float(opts.skyres)*float(opts.skyres)*i print 'Minimum sky area containing injection point = %f square degrees'%min_sky_area_containing_injection myfig=plt.figure() plt.clf() m=Basemap(projection='moll',lon_0=180.0,lat_0=0.0) plx,ply=m(np.asarray(toppoints)[::-1,1]*57.296,np.asarray(toppoints)[::-1,0]*57.296) cnlevel=[1-tp for tp in np.asarray(toppoints)[::-1,3]] plt.scatter(plx,ply,s=5,c=cnlevel,faceted=False,cmap=matplotlib.cm.jet) m.drawmapboundary() m.drawparallels(np.arange(-90.,120.,45.),labels=[1,0,0,0],labelstyle='+/-') # draw parallels m.drawmeridians(np.arange(0.,360.,90.),labels=[0,0,0,1],labelstyle='+/-') # draw meridians plt.title("Skymap") # add a title plt.colorbar() myfig.savefig(os.path.join(outdir,'skymap.png')) plt.clf() #Save skypoints np.savetxt('ranked_sky_pixels',column_stack([np.asarray(toppoints)[:,0:1],np.asarray(toppoints)[:,1],np.asarray(toppoints)[:,3]])) return skyreses,skyinjectionconfidence
def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhist_cart(skycarts,list(pos)) bins=skycarts # Find the bin of the injection if available injbin=None if sky_injpoint: injhist=skyhist_cart(skycarts,array([sky_injpoint])) injbin=injhist.tolist().index(1) print 'Found injection in bin %d with co-ordinates %f,%f .'%(injbin,skypoints[injbin,0],skypoints[injbin,1]) (skyinjectionconfidence,toppoints,skyreses)=bayespputils.calculateConfidenceLevels(shist,skypoints,injbin,float(opts.skyres),confidence_levels,len(pos)) if injbin and skyinjectionconfidence: i=list(np.nonzero(np.asarray(toppoints)[:,2]==injbin))[0] min_sky_area_containing_injection=float(opts.skyres)*float(opts.skyres)*i print 'Minimum sky area containing injection point = %f square degrees'%min_sky_area_containing_injection myfig=plt.figure() plt.clf() m=Basemap(projection='moll',lon_0=180.0,lat_0=0.0) plx,ply=m(np.asarray(toppoints)[::-1,1]*57.296,np.asarray(toppoints)[::-1,0]*57.296) cnlevel=[1-tp for tp in np.asarray(toppoints)[::-1,3]] plt.scatter(plx,ply,s=5,c=cnlevel,faceted=False,cmap=matplotlib.cm.jet) m.drawmapboundary() m.drawparallels(np.arange(-90.,120.,45.),labels=[1,0,0,0],labelstyle='+/-') # draw parallels m.drawmeridians(np.arange(0.,360.,90.),labels=[0,0,0,1],labelstyle='+/-') # draw meridians plt.title("Skymap") # add a title plt.colorbar() myfig.savefig(os.path.join(outdir,'skymap.png')) plt.clf() #Save skypoints np.savetxt('ranked_sky_pixels',column_stack([np.asarray(toppoints)[:,0:1],np.asarray(toppoints)[:,1],np.asarray(toppoints)[:,3]])) return skyreses,skyinjectionconfidence
479,789
def get_ilwdchar_class(tbl_name, col_name): """ Searches the cache of pre-defined ilwdchar subclasses for a class whose table_name and column_name attributes match those provided. If a matching subclass is found it is returned; otherwise a new class is defined, added to the cache, and returned. Example: >>> process_id = get_ilwdchar_class("process", "process_id") >>> x = process_id(10) >>> x <glue.ligolw.ilwd.cached_ilwdchar_class object at 0x2b8de0a186a8> >>> str(x) 'process:process_id:10' Retrieving and storing the class provides a convenient mechanism for quickly constructing new ID objects. Example: >>> for i in range(10): ... print str(process_id(i)) ... process:process_id:0 process:process_id:1 process:process_id:2 process:process_id:3 process:process_id:4 process:process_id:5 process:process_id:6 process:process_id:7 process:process_id:8 process:process_id:9 """ # # if the class already exists, retrieve it # key = (str(tbl_name), str(col_name)) try: return ilwdchar_class_cache[key] except KeyError: # # define a new class, and add it to the cache # class cached_ilwdchar_class(ilwdchar): __slots__ = () table_name, column_name = key index_offset = len("%s:%s:" % key) def __conform__(self, protocol): # The presence of this method allows # ilwdchar sub-classes to be inserted # directly into SQLite databases as # strings. See # # http://www.python.org/dev/peps/pep-0246 # # for more information. # # NOTE: GvR has rejected that PEP, so this # mechanism is obsolete. Be prepared to # fix this, replacing it with whatever # replaces it. # # NOTE: The return should be inside an "if # protocol is sqlite3.PrepareProtocol:" # conditional, but that would require # importing sqlite3 which would break this # module on FC4 boxes, and I'm not going to # spend time fixing something that's # obsolete anyway. return unicode(self) ilwdchar_class_cache[key] = cached_ilwdchar_class return cached_ilwdchar_class
def get_ilwdchar_class(tbl_name, col_name): """ Searches the cache of pre-defined ilwdchar subclasses for a class whose table_name and column_name attributes match those provided. If a matching subclass is found it is returned; otherwise a new class is defined, added to the cache, and returned. Example: >>> process_id = get_ilwdchar_class("process", "process_id") >>> x = process_id(10) >>> x <glue.ligolw.ilwd.cached_ilwdchar_class object at 0x2b8de0a186a8> >>> str(x) 'process:process_id:10' Retrieving and storing the class provides a convenient mechanism for quickly constructing new ID objects. Example: >>> for i in range(10): ... print str(process_id(i)) ... process:process_id:0 process:process_id:1 process:process_id:2 process:process_id:3 process:process_id:4 process:process_id:5 process:process_id:6 process:process_id:7 process:process_id:8 process:process_id:9 """ # # if the class already exists, retrieve it # key = (str(tbl_name), str(col_name)) try: return ilwdchar_class_cache[key] except KeyError: # # define a new class, and add it to the cache # class cached_ilwdchar_class(ilwdchar): __slots__ = () table_name, column_name = key index_offset = len("%s:%s:" % key) def __conform__(self, protocol): # The presence of this method allows # ilwdchar sub-classes to be inserted # directly into SQLite databases as # strings. See # # http://www.python.org/dev/peps/pep-0246 # # for more information. # # NOTE: GvR has rejected that PEP, so this # mechanism is obsolete. Be prepared to # fix this, replacing it with whatever # replaces it. # # NOTE: The return should be inside an "if # protocol is sqlite3.PrepareProtocol:" # conditional, but that would require # importing sqlite3 which would break this # module on FC4 boxes, and I'm not going to # spend time fixing something that's # obsolete anyway. return unicode(self) ilwdchar_class_cache[key] = cached_ilwdchar_class return cached_ilwdchar_class
479,790
def get_ilwdchar_class(tbl_name, col_name): """ Searches the cache of pre-defined ilwdchar subclasses for a class whose table_name and column_name attributes match those provided. If a matching subclass is found it is returned; otherwise a new class is defined, added to the cache, and returned. Example: >>> process_id = get_ilwdchar_class("process", "process_id") >>> x = process_id(10) >>> x <glue.ligolw.ilwd.cached_ilwdchar_class object at 0x2b8de0a186a8> >>> str(x) 'process:process_id:10' Retrieving and storing the class provides a convenient mechanism for quickly constructing new ID objects. Example: >>> for i in range(10): ... print str(process_id(i)) ... process:process_id:0 process:process_id:1 process:process_id:2 process:process_id:3 process:process_id:4 process:process_id:5 process:process_id:6 process:process_id:7 process:process_id:8 process:process_id:9 """ # # if the class already exists, retrieve it # key = (str(tbl_name), str(col_name)) try: return ilwdchar_class_cache[key] except KeyError: # # define a new class, and add it to the cache # class cached_ilwdchar_class(ilwdchar): __slots__ = () table_name, column_name = key index_offset = len("%s:%s:" % key) def __conform__(self, protocol): # The presence of this method allows # ilwdchar sub-classes to be inserted # directly into SQLite databases as # strings. See # # http://www.python.org/dev/peps/pep-0246 # # for more information. # # NOTE: GvR has rejected that PEP, so this # mechanism is obsolete. Be prepared to # fix this, replacing it with whatever # replaces it. # # NOTE: The return should be inside an "if # protocol is sqlite3.PrepareProtocol:" # conditional, but that would require # importing sqlite3 which would break this # module on FC4 boxes, and I'm not going to # spend time fixing something that's # obsolete anyway. return unicode(self) ilwdchar_class_cache[key] = cached_ilwdchar_class return cached_ilwdchar_class
def get_ilwdchar_class(tbl_name, col_name): """ Searches the cache of pre-defined ilwdchar subclasses for a class whose table_name and column_name attributes match those provided. If a matching subclass is found it is returned; otherwise a new class is defined, added to the cache, and returned. Example: >>> process_id = get_ilwdchar_class("process", "process_id") >>> x = process_id(10) >>> x <glue.ligolw.ilwd.cached_ilwdchar_class object at 0x2b8de0a186a8> >>> str(x) 'process:process_id:10' Retrieving and storing the class provides a convenient mechanism for quickly constructing new ID objects. Example: >>> for i in range(10): ... print str(process_id(i)) ... process:process_id:0 process:process_id:1 process:process_id:2 process:process_id:3 process:process_id:4 process:process_id:5 process:process_id:6 process:process_id:7 process:process_id:8 process:process_id:9 """ # # if the class already exists, retrieve it # key = (str(tbl_name), str(col_name)) try: return ilwdchar_class_cache[key] except KeyError: # # define a new class, and add it to the cache # class cached_ilwdchar_class(ilwdchar): __slots__ = () table_name, column_name = key index_offset = len("%s:%s:" % key) def __conform__(self, protocol): # The presence of this method allows # ilwdchar sub-classes to be inserted # directly into SQLite databases as # strings. See # # http://www.python.org/dev/peps/pep-0246 # # for more information. # # NOTE: GvR has rejected that PEP, so this # mechanism is obsolete. Be prepared to # fix this, replacing it with whatever # replaces it. # # NOTE: The return should be inside an "if # protocol is sqlite3.PrepareProtocol:" # conditional, but that would require # importing sqlite3 which would break this # module on FC4 boxes, and I'm not going to # spend time fixing something that's # obsolete anyway. return unicode(self) ilwdchar_class_cache[key] = cached_ilwdchar_class return cached_ilwdchar_class
479,791
def __init__(self, configfile=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind")
def __init__(self, configfile=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind")
479,792
def protract(self, x): """ Move both the start and the end of the segment a distance x away from the other. """ return self.__class__(self[0] - x, self[1] + x)
def protract(self, x): """ Return a new segment whose bounds are given by subtracting x from the segment's lower bound and adding x to the segment's upper bound. """ return self.__class__(self[0] - x, self[1] + x)
479,793
def contract(self, x): """ Move both the start and the end of the segment a distance x towards the the other. """ return self.__class__(self[0] + x, self[1] - x)
def contract(self, x): """ Return a new segment whose bounds are given by adding x to the segment's lower bound and subtracting x from the segment's upper bound. """ return self.__class__(self[0] + x, self[1] - x)
479,794
def shift(self, x): """ Return a new segment by adding x to the upper and lower bounds of this segment. """ return tuple.__new__(self.__class__, (self[0] + x, self[1] + x))
def shift(self, x): """ Return a new segment whose bounds are given by adding x to the segment's upper and lower bounds. """ return tuple.__new__(self.__class__, (self[0] + x, self[1] + x))
479,795
def protract(self, x): """ For each segment in the list, move both the start and the end a distance x away from the other. Coalesce the result. Segmentlist is modified in place. """ for i in xrange(len(self)): self[i] = self[i].protract(x) return self.coalesce()
def protract(self, x): """ Execute the .protract() method on each segment in the list and coalesce the result. Segmentlist is modified in place. """ for i in xrange(len(self)): self[i] = self[i].protract(x) return self.coalesce()
479,796
def contract(self, x): """ For each segment in the list, move both the start and the end a distance x towards the other. Coalesce the result. Segmentlist is modified in place. """ for i in xrange(len(self)): self[i] = self[i].contract(x) return self.coalesce()
def contract(self, x): """ Execute the .contract() method on each segment in the list and coalesce the result. Segmentlist is modified in place. """ for i in xrange(len(self)): self[i] = self[i].contract(x) return self.coalesce()
479,797
def shift(self, x): """ Shift the segmentlist by adding x to the upper and lower bounds of all segments. The algorithm is O(n) and does not require the list to be coalesced. Segmentlist is modified in place. """ for i in xrange(len(self)): self[i] = self[i].shift(x) return self
def shift(self, x): """ Execute the .shift() method on each segment in the list. The algorithm is O(n) and does not require the list to be coalesced nor does it coalesce the list. Segmentlist is modified in place. """ for i in xrange(len(self)): self[i] = self[i].shift(x) return self
479,798
def popitem(*args): raise NotImplementedError
def popitem(*args): raise NotImplementedError
479,799