rem
stringlengths 1
322k
| add
stringlengths 0
2.05M
| context
stringlengths 4
228k
| meta
stringlengths 156
215
|
---|---|---|---|
plot(injection.mass1,injection.distance,'go') | plot(injection.mass1,injection.distance,'go',scalex=False,scaley=False) | def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp() | 1c7e777c94d676d98831f95002598d58404c4fe8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/1c7e777c94d676d98831f95002598d58404c4fe8/OddsPostProc.py |
plot(getinjpar(injection,4),getinjpar(injection,8),'go') | plot(getinjpar(injection,4),getinjpar(injection,8),'go',scalex=False,scaley=False) | def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp() | 1c7e777c94d676d98831f95002598d58404c4fe8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/1c7e777c94d676d98831f95002598d58404c4fe8/OddsPostProc.py |
plot(getinjpar(injection,i),getinjpar(injection,j),'go') | plot(getinjpar(injection,i),getinjpar(injection,j),'go',scalex=False,scaley=False) | def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp() | 1c7e777c94d676d98831f95002598d58404c4fe8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/1c7e777c94d676d98831f95002598d58404c4fe8/OddsPostProc.py |
plot([getinjpar(injection,i),getinjpar(injection,i)],[0,max(kdepdf)],'r-.') | plot([getinjpar(injection,i),getinjpar(injection,i)],[0,max(kdepdf)],'r-.',scalex=False,scaley=False) | def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp() | 1c7e777c94d676d98831f95002598d58404c4fe8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/1c7e777c94d676d98831f95002598d58404c4fe8/OddsPostProc.py |
htmlfile.write('<img src="'+paramnames[i]+'.png">') | myfig=figure(figsize=(4,3.5),dpi=80) plot(pos[:,i],'.') if injection and min(pos[:,i])<getinjpar(injection,i) and max(pos[:,i])>getinjpar(injection,i): plot([0,len(pos)],[getinjpar(injection,i),getinjpar(injection,i)],'r-.') myfig.savefig(outdir+'/'+paramnames[i]+'_samps.png') htmlfile.write('<img src="'+paramnames[i]+'.png"><img src="'+paramnames[i]+'_samps.png><br>') | def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp() | 1c7e777c94d676d98831f95002598d58404c4fe8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/1c7e777c94d676d98831f95002598d58404c4fe8/OddsPostProc.py |
["src/xlal/date.c"], | ["src/xlal/date.c", "src/xlal/misc.c"], | def run(self): # remove the automatically generated user env scripts for script in ["pylal-user-env.sh", "pylal-user-env.csh"]: log.info("removing " + script ) try: os.unlink(os.path.join("etc", script)) except: pass | be80e194ad3e1beb2ce4d17921d438d75f3e5bfd /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/be80e194ad3e1beb2ce4d17921d438d75f3e5bfd/setup.py |
injected_cols.extend(['injected_end_time', 'injected_end_time_ns', 'injected_end_time_utc__Px_click_for_daily_ihope_xP_']) | injected_cols.extend(['injected_decisive_distance','injected_end_time', 'injected_end_time_ns', 'injected_end_time_utc__Px_click_for_daily_ihope_xP_']) | def convert_duration( duration ): return sqlutils.convert_duration( duration, convert_durations ) | 66c62ec79b0aba296ba768048392060c9834f293 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/66c62ec79b0aba296ba768048392060c9834f293/printutils.py |
from glue.lal import LIGOTimeGPS | def get_pyvalue(self): return generic_get_pyvalue(self) | 66c62ec79b0aba296ba768048392060c9834f293 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/66c62ec79b0aba296ba768048392060c9834f293/printutils.py |
|
AND rank(""", decisive_distance, """) <= """, str(limit), """ | %s""" % (limit is not None and ''.join(['AND rank(', decisive_distance, ') <= ', str(limit)]) or ''), """ | def get_decisive_distance( *args ): return sorted(args)[1] | 66c62ec79b0aba296ba768048392060c9834f293 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/66c62ec79b0aba296ba768048392060c9834f293/printutils.py |
y.segment_def_cdb = x.creator_db) AND \ NOT (segment.start_time > %s OR %s > segment.end_time) \ | y.segment_def_cdb = x.creator_db) \ | def getSciSegs(ifo=None, gpsStart=None, gpsStop=None, cut=bool(False), serverURL=None, segName="DMT-SCIENCE", seglenmin=None, segpading=0 | 66b3b6bdd1f5fd4c09d95bfa909886e3a8002dc7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/66b3b6bdd1f5fd4c09d95bfa909886e3a8002dc7/fu_utils.py |
sqlQuery=query01%(segName,ifo,gpsStop,gpsStart,gpsStop,gpsStart) | sqlQuery=query01%(segName,ifo,gpsStop,gpsStart) | def getSciSegs(ifo=None, gpsStart=None, gpsStop=None, cut=bool(False), serverURL=None, segName="DMT-SCIENCE", seglenmin=None, segpading=0 | 66b3b6bdd1f5fd4c09d95bfa909886e3a8002dc7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/66b3b6bdd1f5fd4c09d95bfa909886e3a8002dc7/fu_utils.py |
AND y.segment_def_cdb = x.creator_db \ | AND y.segment_def_cdb = x.creator_db \ | def __init__(self,LDBDServerURL=None,quiet=bool(False),pickle=None,blinded=False): """ This class setups of for connecting to a LDBD server specified at command line to do segment queries as part of the follow up pipeline. If the user does not specify the LDBD server to use the method will use the environment variable S6_SEGMENT_SERVER to determine who to query. The LDBD URL should be in the following form ldbd://myserver.domain.name:808080. You can specify the path to a background DQ pickle if the path is valid the class opens it otherwise it queries the segment DB and builds the pickle. Warning! recreating the DQ background is VERY slow. """ self.__connection__= None self.__engine__= None self.__installPath__=home_dir()+"/ctorres/followupbackgrounds/dq/" self.__blinded__=blinded self.__blindFlags__=[\ "DMT-INJECTION_INSPIRAL", "DMT-INJECTION"\ ] if pickle==None: self.__backgroundPickle__=None else: self.__backgroundPickle=pickle self.__backgroundPickle__=os.path.expanduser(self.__backgroundPickle__) self.__haveBackgroundDict__=bool(False) self.__havecategories__=bool(False) #A dict for a dict of GPStimes and list all flags seen self.__backgroundDict__=dict() self.__category__=dict() #Access a dict of dicts for a flag names with % stored self.__backgroundResults__=dict() self.__backgroundPoints__=int(1000) self.__columns__=["Ifo","Flag","Ver","Start","Offset",\ "Stop","Offset","Size","DQ Rank","Cat(s)"] #Dict should be a dict of lists self.__backgroundTimesDict__=dict() self.ifos=interferometers self.ifos.sort() self.triggerTime=int(-1) self.serverURL=defaultsegmentserver if LDBDServerURL==None: envServer=None envServer=os.getenv('S6_SEGMENT_SERVER') if envServer!=None: self.serverURL=envServer sys.stderr.write("Warning no LDBD Server URL specified \ | 66b3b6bdd1f5fd4c09d95bfa909886e3a8002dc7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/66b3b6bdd1f5fd4c09d95bfa909886e3a8002dc7/fu_utils.py |
shiftString=shift | shiftString=shiftLabel[shift] | def getFOMLinks(gpsTime=int(0),ifo=("default")): """ Simple method returns a list of links to FOMs ordered by FOM # The list is 2D ie: [['ifo,shift',LINKtoImage,LinktoThumb],['ifo,shift',LinktoImage,LinkToThumb]...] images marked [Eve,Owl,Day] via [p3,p2,p1] in filenames this methd only for S6 and later IFO naming start dates: There were three naming conventions mixed, then p1,p2,p3 and lastly Day,Eve,Owl LHO: 20090724 :: 932428815 LLO: 20090708 :: 931046415 """ urls={ "DEFAULT":"http://www.ligo.caltech.edu/~pshawhan/scilinks.html", "V1":"http://wwwcascina.virgo.infn.it/DetectorOperations/index.htm", "L1":"https://llocds.ligo-la.caltech.edu/scirun/S6/robofom/%s/%s%s_FOM%i%s.gif", "H1":"http://lhocds.ligo-wa.caltech.edu/scirun/S6/robofom/%s/%s%s_FOM%i%s.gif", "H2":"http://lhocds.ligo-wa.caltech.edu/scirun/S6/robofom/%s/%s%s_FOM%i%s.gif" } ifoTag=ifo.upper() shiftDuration=8; #Give the IFO and shift start hour as integer shiftStandardTime={'L1':{'day':14,'eve':22,'owl':6}, 'H1':{'day':16,'eve':0,'owl':8}, 'H2':{'day':16,'eve':0,'owl':8}, 'V1':{'day':6,'eve':14,'owl':22}} shiftOrder=['day','eve','owl'] shiftLabel={'day':'p1','eve':'p3','owl':'p2'} outputURLs=list() if ((ifo==None) or (gpsTime==None)): sys.stdout.write("getFOMLinks called incorrectly \ | 6cd44b04b081138f0ba71fef8fcfaf1fc7a38e27 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/6cd44b04b081138f0ba71fef8fcfaf1fc7a38e27/stfu_pipe.py |
zero_lag_time_slides = {} background_time_slides = {} for id, instrument, offset, is_background in connection.cursor().execute(""" | offset_vectors = {} for id, instrument, offset in connection.cursor().execute(""" | def get_time_slides(connection): """ Query the database for the IDs and offsets of all time slides, and return two dictionaries one containing the all-zero time slides and the other containing the not-all-zero time slides. """ zero_lag_time_slides = {} background_time_slides = {} for id, instrument, offset, is_background in connection.cursor().execute(""" | 4b4b5780a156863532be0b9f80623ca9e7f9af33 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/4b4b5780a156863532be0b9f80623ca9e7f9af33/SnglBurstUtils.py |
offset, EXISTS ( SELECT * FROM time_slide AS a WHERE a.time_slide_id == time_slide.time_slide_id AND a.offset != 0 ) | offset | def get_time_slides(connection): """ Query the database for the IDs and offsets of all time slides, and return two dictionaries one containing the all-zero time slides and the other containing the not-all-zero time slides. """ zero_lag_time_slides = {} background_time_slides = {} for id, instrument, offset, is_background in connection.cursor().execute(""" | 4b4b5780a156863532be0b9f80623ca9e7f9af33 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/4b4b5780a156863532be0b9f80623ca9e7f9af33/SnglBurstUtils.py |
if is_background: if id not in background_time_slides: background_time_slides[id] = {} background_time_slides[id][instrument] = offset else: if id not in zero_lag_time_slides: zero_lag_time_slides[id] = {} zero_lag_time_slides[id][instrument] = offset | if id not in offset_vectors: offset_vectors[id] = {} offset_vectors[id][instrument] = offset zero_lag_time_slides = dict((id, offset_vector) for id, offset_vector in offset_vectors.items() if not any(offset_vector.values())) background_time_slides = dict((id, offset_vector) for id, offset_vector in offset_vectors.items() if any(offset_vector.values())) | def get_time_slides(connection): """ Query the database for the IDs and offsets of all time slides, and return two dictionaries one containing the all-zero time slides and the other containing the not-all-zero time slides. """ zero_lag_time_slides = {} background_time_slides = {} for id, instrument, offset, is_background in connection.cursor().execute(""" | 4b4b5780a156863532be0b9f80623ca9e7f9af33 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/4b4b5780a156863532be0b9f80623ca9e7f9af33/SnglBurstUtils.py |
return r"%s \\times 10^{%d}" % (m, int(e)) | return r"%s \times 10^{%d}" % (m, int(e)) | def latexnumber(s): """ Convert a string of the form "d.dddde-dd" to "d.dddd \times 10^{-dd}" """ m, e = floatpattern.match(s).groups() return r"%s \\times 10^{%d}" % (m, int(e)) | 4b4b5780a156863532be0b9f80623ca9e7f9af33 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/4b4b5780a156863532be0b9f80623ca9e7f9af33/SnglBurstUtils.py |
rate.to_moving_mean_density(binnedarray, filters.get(name, default_filter)) | def finish(self, filters = {}, verbose = False): default_filter = rate.gaussian_window(21) # normalizing each array so that its sum is 1 has the # effect of making the integral of P(x) dx equal 1 after # the array is transformed to an array of densities (which # is done by dividing each bin by dx). N = len(self.zero_lag_rates) + len(self.background_rates) + len(self.injection_rates) n = 0 threads = [] for group, (name, binnedarray) in itertools.chain(zip(["zero lag"] * len(self.zero_lag_rates), self.zero_lag_rates.items()), zip(["background"] * len(self.background_rates), self.background_rates.items()), zip(["injections"] * len(self.injection_rates), self.injection_rates.items())): n += 1 if verbose: print >>sys.stderr, "\t%d / %d: %s \"%s\"" % (n, N, group, name) binnedarray.array /= numpy.sum(binnedarray.array) threads.append(threading.Thread(target = rate.to_moving_mean_density, args = (binnedarray, filters.get(name, default_filter)))) threads[-1].start() rate.to_moving_mean_density(binnedarray, filters.get(name, default_filter)) for thread in threads: thread.join() return self | b2c43f402b25fddd2b8a6094c36b902be73280ae /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/b2c43f402b25fddd2b8a6094c36b902be73280ae/ligolw_burca_tailor.py |
|
twoDKdePath=os.path.join(margdir,par1_name+'-'+par2_name+'_2Dkernel.png') | figname=par1_name+'-'+par2_name+'_2Dkernel.png' twoDKdePath=os.path.join(margdir,figname) | def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpage from a file containing posterior samples generated by the parameter estimation codes with 1D/2D plots and stats from the marginal posteriors for each parameter/set of parameters. """ if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # commonOutputFileObj=open(data[0]) #Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() ## Load Bayes factors ## # Add Bayes factor information to summary file # if bayesfactornoise is not None: bfile=open(bayesfactornoise,'r') BSN=bfile.read() bfile.close() print 'BSN: %s'%BSN if bayesfactorcoherent is not None: bfile=open(bayesfactorcoherent,'r') BCI=bfile.read() bfile.close() print 'BCI: %s'%BCI #Create an instance of the posterior class using the posterior values loaded #from the file and any injection information (if given). pos = bppu.Posterior(commonOutputFileObj,SimInspiralTableEntry=injection) if ('mc' in pos.names or 'mchirp' in pos.names) and \ 'eta' in pos.names and \ ('mass1' not in pos.names or 'm1' not in pos.names) and\ ('m2' not in pos.names or 'm2' not in pos.names): if 'mc' in pos.names: mchirp_name='mc' else: mchirp_name='mchirp' if injection: inj_mass1,inj_mass2=bppu.mc2ms(injection.mchirp,injection.eta) mass1_samps,mass2_samps=bppu.mc2ms(pos[mchirp_name].samples,pos['eta'].samples) mass1_pos=bppu.OneDPosterior('m1',mass1_samps,injected_value=inj_mass1) mass2_pos=bppu.OneDPosterior('m2',mass2_samps,injected_value=inj_mass2) pos.append(mass1_pos) pos.append(mass2_pos) ##Print some summary stats for the user...## #Number of samples print "Number of posterior samples: %i"%len(pos) # Means print 'Means:' print str(pos.means) #Median print 'Median:' print str(pos.medians) #maxL print 'maxL:' max_pos,max_pos_co=pos.maxL print max_pos_co #==================================================================# #Create web page #==================================================================# html=bppu.htmlPage('Posterior PDFs') #Create a section for meta-data/run information html_meta=html.add_section('Summary') html_meta.p('Produced from '+str(len(pos))+' posterior samples.') html_meta.p('Samples read from %s'%(data[0])) #Create a section for model selection results (if they exist) if bayesfactornoise is not None: html_model=html.add_section('Model selection') html_model.p('log Bayes factor ( coherent vs gaussian noise) = %s, Bayes factor=%f'%(BSN,exp(float(BSN)))) if bayesfactorcoherent is not None: html_model.p('log Bayes factor ( coherent vs incoherent OR noise ) = %s, Bayes factor=%f'%(BCI,exp(float(BCI)))) #Create a section for summary statistics html_stats=html.add_section('Summary statistics') html_stats.write(str(pos)) #==================================================================# #Generate sky map #==================================================================# #If sky resolution parameter has been specified try and create sky map... skyreses=None sky_injection_cl=None if skyres is not None and 'ra' in pos.names and 'dec' in pos.names: #Greedy bin sky samples (ra,dec) into a grid on the sky which preserves #? top_ranked_sky_pixels,sky_injection_cl,skyreses,injection_area=bppu.greedy_bin_sky(pos,skyres,confidence_levels) print "BCI for sky area:" print skyreses #Create sky map in outdir bppu.plot_sky_map(top_ranked_sky_pixels,outdir) #Create a web page section for sky localization results/plots html_sky=html.add_section('Sky Localization') if injection: if sky_injection_cl: html_sky.p('Injection found at confidence interval %f in sky location'%(sky_injection_cl)) else: html_sky.p('Injection not found in posterior bins in sky location!') html_sky.write('<img width="35%" src="skymap.png"/>') if skyres is not None: html_sky_write='<table border="1"><tr><th>Confidence region</th><th>size (sq. deg)</th></tr>' fracs=skyreses.keys() fracs.sort() skysizes=[skyreses[frac] for frac in fracs] for frac,skysize in zip(fracs,skysizes): html_sky_write+=('<tr><td>%f</td><td>%f</td></tr>'%(frac,skysize)) html_sky_write+=('</table>') html_sky.write(html_sky_write) #==================================================================# #2D posteriors #==================================================================# #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. #Make a folder for the 2D kde plots margdir=os.path.join(outdir,'2Dkde') if not os.path.isdir(margdir): os.makedirs(margdir) twobinsdir=os.path.join(outdir,'2Dbins') if not os.path.isdir(twobinsdir): os.makedirs(twobinsdir) #Add a section to the webpage for a table of the confidence interval #results. html_tcig=html.add_section('2D confidence intervals (greedy binning)') #Generate the top part of the table html_tcig_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_tcig_write+='<th>%f</th>'%cl if injection: html_tcig_write+='<th>Injection Confidence Level</th>' html_tcig_write+='<th>Injection Confidence Interval</th>' html_tcig_write+='</tr>' #= Add a section for a table of 2D marginal PDFs (kde) html_tcmp=html.add_section('2D Marginal PDFs') html_tcmp.br() #Table matter html_tcmp_write='<table border="1" width="100%">' row_count=0 for par1_name,par2_name in twoDGreedyMenu: par1_name=par1_name.lower() par2_name=par2_name.lower() print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) try: pos[par1_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par1_name continue try: pos[par2_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par2_name continue #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Form greedy binning input structure greedy2Params={par1_name:par1_bin,par2_name:par2_bin} #Greedy bin the posterior samples toppoints,injection_cl,reses,injection_area=\ bppu.greedy_bin_two_param(pos,greedy2Params,confidence_levels) print "BCI %s-%s:"%(par1_name,par2_name) print reses #Generate new BCI html table row BCItableline='<tr><td>%s-%s</td>'%(par1_name,par2_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injection_cl is not None: BCItableline+='<td>%f</td>'%injection_cl BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_tcig_write+=BCItableline #= Plot 2D histograms of greedily binned points =# #greedy2PlotFig=bppu.plot_two_param_greedy_bins(np.array(toppoints),pos,greedy2Params) #greedy2PlotFig.savefig(os.path.join(twobinsdir,'%s-%s_greedy2.png'%(par1_name,par2_name))) #= Generate 2D kde plots =# print 'Generating %s-%s plot'%(par1_name,par2_name) par1_pos=pos[par1_name].samples par2_pos=pos[par2_name].samples if (size(np.unique(par1_pos))<2 or size(np.unique(par2_pos))<2): continue plot2DkdeParams={par1_name:50,par2_name:50} myfig=bppu.plot_two_param_kde(pos,plot2DkdeParams) twoDKdePath=os.path.join(margdir,par1_name+'-'+par2_name+'_2Dkernel.png') if row_count==0: html_tcmp_write+='<tr>' html_tcmp_write+='<td width="30%"><img width="100%" src="'+twoDKdePath+'"/></td>' row_count+=1 if row_count==3: html_tcmp_write+='</tr>' row_count=0 myfig.savefig(twoDKdePath) #Finish off the BCI table and write it into the etree html_tcig_write+='</table>' html_tcig.write(html_tcig_write) #Finish off the 2D kde plot table while row_count!=0: html_tcmp_write+='<td/>' row_count+=1 if row_count==3: row_count=0 html_tcmp_write+='</tr>' html_tcmp_write+='</table>' html_tcmp.write(html_tcmp_write) #Add a link to all plots html_tcmp.br() html_tcmp.a("2D/",'All 2D Marginal PDFs') html_tcmp.hr() #==================================================================# #1D posteriors #==================================================================# #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. #Add section for 1D confidence intervals html_ogci=html.add_section('1D confidence intervals (greedy binning)') #Generate the top part of the table html_ogci_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_ogci_write+='<th>%f</th>'%cl if injection: html_ogci_write+='<th>Injection Confidence Level</th>' html_ogci_write+='<th>Injection Confidence Interval</th>' html_ogci_write+='</tr>' #Add section for 1D marginal PDFs and sample plots html_ompdf=html.add_section('1D marginal posterior PDFs') html_ompdf.br() #Table matter html_ompdf_write= '<table><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th></tr>' onepdfdir=os.path.join(outdir,'1Dpdf') if not os.path.isdir(onepdfdir): os.makedirs(onepdfdir) sampsdir=os.path.join(outdir,'1Dsamps') if not os.path.isdir(sampsdir): os.makedirs(sampsdir) for par_name in oneDMenu: par_name=par_name.lower() print "Binning %s to determine confidence levels ..."%par_name try: pos[par_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue binParams={par_name:par_bin} toppoints,injectionconfidence,reses,injection_area=bppu.greedy_bin_one_param(pos,binParams,confidence_levels) oneDContCL,oneDContInj = bppu.contigious_interval_one_param(pos,binParams,confidence_levels) #Generate new BCI html table row BCItableline='<tr><td>%s</td>'%(par_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injectionconfidence is not None and injection_area is not None: BCItableline+='<td>%f</td>'%injectionconfidence BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_ogci_write+=BCItableline #Generate 1D histogram/kde plots print "Generating 1D plot for %s."%par_name oneDPDFParams={par_name:50} rbins,plotFig=bppu.plot_one_param_pdf(pos,oneDPDFParams) figname=par_name+'.png' oneDplotPath=os.path.join(onepdfdir,figname) plotFig.savefig(oneDplotPath) if rbins: print "r of injected value of %s (bins) = %f"%(par_name, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) pos_samps=pos[par_name].samples plt.plot(pos_samps,'.',figure=myfig) injpar=pos[par_name].injval if injpar: if min(pos_samps)<injpar and max(pos_samps)>injpar: plt.plot([0,len(pos_samps)],[injpar,injpar],'r-.') myfig.savefig(os.path.join(sampsdir,figname.replace('.png','_samps.png'))) html_ompdf_write+='<tr><td><img src="1Dpdf/'+figname+'"/></td><td><img src="1Dsamps/'+figname.replace('.png','_samps.png')+'"/></td></tr>' html_ompdf_write+='</table>' html_ompdf.write(html_ompdf_write) html_ogci_write+='</table>' html_ogci.write(html_ogci_write) html_ogci.hr() html_ogci.br() html_ompdf.hr() html_ompdf.br() html_footer=html.add_section('') html_footer.p('Produced using cbcBayesPostProc.py at '+strftime("%Y-%m-%d %H:%M:%S")+' .') html_footer.p(git_version.verbose_msg) #Save results page resultspage=open(os.path.join(outdir,'posplots.html'),'w') resultspage.write(str(html)) # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') input_file=open(data[0]) posfile.write(input_file.read()) # posfilename2=os.path.join(outdir,'posterior_samples2.dat') pos.write_to_file(posfilename2) #Close files input_file.close() posfile.close() resultspage.close() | 25081af9755dc3b90ae7c9418d3526e51860c233 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/25081af9755dc3b90ae7c9418d3526e51860c233/cbcBayesPostProc.py |
html_tcmp_write+='<td width="30%"><img width="100%" src="'+twoDKdePath+'"/></td>' | html_tcmp_write+='<td width="30%"><img width="100%" src="2Dkde/'+twoDKdePath+'"/></td>' | def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpage from a file containing posterior samples generated by the parameter estimation codes with 1D/2D plots and stats from the marginal posteriors for each parameter/set of parameters. """ if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # commonOutputFileObj=open(data[0]) #Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() ## Load Bayes factors ## # Add Bayes factor information to summary file # if bayesfactornoise is not None: bfile=open(bayesfactornoise,'r') BSN=bfile.read() bfile.close() print 'BSN: %s'%BSN if bayesfactorcoherent is not None: bfile=open(bayesfactorcoherent,'r') BCI=bfile.read() bfile.close() print 'BCI: %s'%BCI #Create an instance of the posterior class using the posterior values loaded #from the file and any injection information (if given). pos = bppu.Posterior(commonOutputFileObj,SimInspiralTableEntry=injection) if ('mc' in pos.names or 'mchirp' in pos.names) and \ 'eta' in pos.names and \ ('mass1' not in pos.names or 'm1' not in pos.names) and\ ('m2' not in pos.names or 'm2' not in pos.names): if 'mc' in pos.names: mchirp_name='mc' else: mchirp_name='mchirp' if injection: inj_mass1,inj_mass2=bppu.mc2ms(injection.mchirp,injection.eta) mass1_samps,mass2_samps=bppu.mc2ms(pos[mchirp_name].samples,pos['eta'].samples) mass1_pos=bppu.OneDPosterior('m1',mass1_samps,injected_value=inj_mass1) mass2_pos=bppu.OneDPosterior('m2',mass2_samps,injected_value=inj_mass2) pos.append(mass1_pos) pos.append(mass2_pos) ##Print some summary stats for the user...## #Number of samples print "Number of posterior samples: %i"%len(pos) # Means print 'Means:' print str(pos.means) #Median print 'Median:' print str(pos.medians) #maxL print 'maxL:' max_pos,max_pos_co=pos.maxL print max_pos_co #==================================================================# #Create web page #==================================================================# html=bppu.htmlPage('Posterior PDFs') #Create a section for meta-data/run information html_meta=html.add_section('Summary') html_meta.p('Produced from '+str(len(pos))+' posterior samples.') html_meta.p('Samples read from %s'%(data[0])) #Create a section for model selection results (if they exist) if bayesfactornoise is not None: html_model=html.add_section('Model selection') html_model.p('log Bayes factor ( coherent vs gaussian noise) = %s, Bayes factor=%f'%(BSN,exp(float(BSN)))) if bayesfactorcoherent is not None: html_model.p('log Bayes factor ( coherent vs incoherent OR noise ) = %s, Bayes factor=%f'%(BCI,exp(float(BCI)))) #Create a section for summary statistics html_stats=html.add_section('Summary statistics') html_stats.write(str(pos)) #==================================================================# #Generate sky map #==================================================================# #If sky resolution parameter has been specified try and create sky map... skyreses=None sky_injection_cl=None if skyres is not None and 'ra' in pos.names and 'dec' in pos.names: #Greedy bin sky samples (ra,dec) into a grid on the sky which preserves #? top_ranked_sky_pixels,sky_injection_cl,skyreses,injection_area=bppu.greedy_bin_sky(pos,skyres,confidence_levels) print "BCI for sky area:" print skyreses #Create sky map in outdir bppu.plot_sky_map(top_ranked_sky_pixels,outdir) #Create a web page section for sky localization results/plots html_sky=html.add_section('Sky Localization') if injection: if sky_injection_cl: html_sky.p('Injection found at confidence interval %f in sky location'%(sky_injection_cl)) else: html_sky.p('Injection not found in posterior bins in sky location!') html_sky.write('<img width="35%" src="skymap.png"/>') if skyres is not None: html_sky_write='<table border="1"><tr><th>Confidence region</th><th>size (sq. deg)</th></tr>' fracs=skyreses.keys() fracs.sort() skysizes=[skyreses[frac] for frac in fracs] for frac,skysize in zip(fracs,skysizes): html_sky_write+=('<tr><td>%f</td><td>%f</td></tr>'%(frac,skysize)) html_sky_write+=('</table>') html_sky.write(html_sky_write) #==================================================================# #2D posteriors #==================================================================# #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. #Make a folder for the 2D kde plots margdir=os.path.join(outdir,'2Dkde') if not os.path.isdir(margdir): os.makedirs(margdir) twobinsdir=os.path.join(outdir,'2Dbins') if not os.path.isdir(twobinsdir): os.makedirs(twobinsdir) #Add a section to the webpage for a table of the confidence interval #results. html_tcig=html.add_section('2D confidence intervals (greedy binning)') #Generate the top part of the table html_tcig_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_tcig_write+='<th>%f</th>'%cl if injection: html_tcig_write+='<th>Injection Confidence Level</th>' html_tcig_write+='<th>Injection Confidence Interval</th>' html_tcig_write+='</tr>' #= Add a section for a table of 2D marginal PDFs (kde) html_tcmp=html.add_section('2D Marginal PDFs') html_tcmp.br() #Table matter html_tcmp_write='<table border="1" width="100%">' row_count=0 for par1_name,par2_name in twoDGreedyMenu: par1_name=par1_name.lower() par2_name=par2_name.lower() print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) try: pos[par1_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par1_name continue try: pos[par2_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par2_name continue #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Form greedy binning input structure greedy2Params={par1_name:par1_bin,par2_name:par2_bin} #Greedy bin the posterior samples toppoints,injection_cl,reses,injection_area=\ bppu.greedy_bin_two_param(pos,greedy2Params,confidence_levels) print "BCI %s-%s:"%(par1_name,par2_name) print reses #Generate new BCI html table row BCItableline='<tr><td>%s-%s</td>'%(par1_name,par2_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injection_cl is not None: BCItableline+='<td>%f</td>'%injection_cl BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_tcig_write+=BCItableline #= Plot 2D histograms of greedily binned points =# #greedy2PlotFig=bppu.plot_two_param_greedy_bins(np.array(toppoints),pos,greedy2Params) #greedy2PlotFig.savefig(os.path.join(twobinsdir,'%s-%s_greedy2.png'%(par1_name,par2_name))) #= Generate 2D kde plots =# print 'Generating %s-%s plot'%(par1_name,par2_name) par1_pos=pos[par1_name].samples par2_pos=pos[par2_name].samples if (size(np.unique(par1_pos))<2 or size(np.unique(par2_pos))<2): continue plot2DkdeParams={par1_name:50,par2_name:50} myfig=bppu.plot_two_param_kde(pos,plot2DkdeParams) twoDKdePath=os.path.join(margdir,par1_name+'-'+par2_name+'_2Dkernel.png') if row_count==0: html_tcmp_write+='<tr>' html_tcmp_write+='<td width="30%"><img width="100%" src="'+twoDKdePath+'"/></td>' row_count+=1 if row_count==3: html_tcmp_write+='</tr>' row_count=0 myfig.savefig(twoDKdePath) #Finish off the BCI table and write it into the etree html_tcig_write+='</table>' html_tcig.write(html_tcig_write) #Finish off the 2D kde plot table while row_count!=0: html_tcmp_write+='<td/>' row_count+=1 if row_count==3: row_count=0 html_tcmp_write+='</tr>' html_tcmp_write+='</table>' html_tcmp.write(html_tcmp_write) #Add a link to all plots html_tcmp.br() html_tcmp.a("2D/",'All 2D Marginal PDFs') html_tcmp.hr() #==================================================================# #1D posteriors #==================================================================# #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. #Add section for 1D confidence intervals html_ogci=html.add_section('1D confidence intervals (greedy binning)') #Generate the top part of the table html_ogci_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_ogci_write+='<th>%f</th>'%cl if injection: html_ogci_write+='<th>Injection Confidence Level</th>' html_ogci_write+='<th>Injection Confidence Interval</th>' html_ogci_write+='</tr>' #Add section for 1D marginal PDFs and sample plots html_ompdf=html.add_section('1D marginal posterior PDFs') html_ompdf.br() #Table matter html_ompdf_write= '<table><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th></tr>' onepdfdir=os.path.join(outdir,'1Dpdf') if not os.path.isdir(onepdfdir): os.makedirs(onepdfdir) sampsdir=os.path.join(outdir,'1Dsamps') if not os.path.isdir(sampsdir): os.makedirs(sampsdir) for par_name in oneDMenu: par_name=par_name.lower() print "Binning %s to determine confidence levels ..."%par_name try: pos[par_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue binParams={par_name:par_bin} toppoints,injectionconfidence,reses,injection_area=bppu.greedy_bin_one_param(pos,binParams,confidence_levels) oneDContCL,oneDContInj = bppu.contigious_interval_one_param(pos,binParams,confidence_levels) #Generate new BCI html table row BCItableline='<tr><td>%s</td>'%(par_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injectionconfidence is not None and injection_area is not None: BCItableline+='<td>%f</td>'%injectionconfidence BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_ogci_write+=BCItableline #Generate 1D histogram/kde plots print "Generating 1D plot for %s."%par_name oneDPDFParams={par_name:50} rbins,plotFig=bppu.plot_one_param_pdf(pos,oneDPDFParams) figname=par_name+'.png' oneDplotPath=os.path.join(onepdfdir,figname) plotFig.savefig(oneDplotPath) if rbins: print "r of injected value of %s (bins) = %f"%(par_name, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) pos_samps=pos[par_name].samples plt.plot(pos_samps,'.',figure=myfig) injpar=pos[par_name].injval if injpar: if min(pos_samps)<injpar and max(pos_samps)>injpar: plt.plot([0,len(pos_samps)],[injpar,injpar],'r-.') myfig.savefig(os.path.join(sampsdir,figname.replace('.png','_samps.png'))) html_ompdf_write+='<tr><td><img src="1Dpdf/'+figname+'"/></td><td><img src="1Dsamps/'+figname.replace('.png','_samps.png')+'"/></td></tr>' html_ompdf_write+='</table>' html_ompdf.write(html_ompdf_write) html_ogci_write+='</table>' html_ogci.write(html_ogci_write) html_ogci.hr() html_ogci.br() html_ompdf.hr() html_ompdf.br() html_footer=html.add_section('') html_footer.p('Produced using cbcBayesPostProc.py at '+strftime("%Y-%m-%d %H:%M:%S")+' .') html_footer.p(git_version.verbose_msg) #Save results page resultspage=open(os.path.join(outdir,'posplots.html'),'w') resultspage.write(str(html)) # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') input_file=open(data[0]) posfile.write(input_file.read()) # posfilename2=os.path.join(outdir,'posterior_samples2.dat') pos.write_to_file(posfilename2) #Close files input_file.close() posfile.close() resultspage.close() | 25081af9755dc3b90ae7c9418d3526e51860c233 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/25081af9755dc3b90ae7c9418d3526e51860c233/cbcBayesPostProc.py |
html_tcmp.a("2D/",'All 2D Marginal PDFs') | html_tcmp.a("2Dkde/",'All 2D marginal PDFs (kde)') | def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpage from a file containing posterior samples generated by the parameter estimation codes with 1D/2D plots and stats from the marginal posteriors for each parameter/set of parameters. """ if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # commonOutputFileObj=open(data[0]) #Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() ## Load Bayes factors ## # Add Bayes factor information to summary file # if bayesfactornoise is not None: bfile=open(bayesfactornoise,'r') BSN=bfile.read() bfile.close() print 'BSN: %s'%BSN if bayesfactorcoherent is not None: bfile=open(bayesfactorcoherent,'r') BCI=bfile.read() bfile.close() print 'BCI: %s'%BCI #Create an instance of the posterior class using the posterior values loaded #from the file and any injection information (if given). pos = bppu.Posterior(commonOutputFileObj,SimInspiralTableEntry=injection) if ('mc' in pos.names or 'mchirp' in pos.names) and \ 'eta' in pos.names and \ ('mass1' not in pos.names or 'm1' not in pos.names) and\ ('m2' not in pos.names or 'm2' not in pos.names): if 'mc' in pos.names: mchirp_name='mc' else: mchirp_name='mchirp' if injection: inj_mass1,inj_mass2=bppu.mc2ms(injection.mchirp,injection.eta) mass1_samps,mass2_samps=bppu.mc2ms(pos[mchirp_name].samples,pos['eta'].samples) mass1_pos=bppu.OneDPosterior('m1',mass1_samps,injected_value=inj_mass1) mass2_pos=bppu.OneDPosterior('m2',mass2_samps,injected_value=inj_mass2) pos.append(mass1_pos) pos.append(mass2_pos) ##Print some summary stats for the user...## #Number of samples print "Number of posterior samples: %i"%len(pos) # Means print 'Means:' print str(pos.means) #Median print 'Median:' print str(pos.medians) #maxL print 'maxL:' max_pos,max_pos_co=pos.maxL print max_pos_co #==================================================================# #Create web page #==================================================================# html=bppu.htmlPage('Posterior PDFs') #Create a section for meta-data/run information html_meta=html.add_section('Summary') html_meta.p('Produced from '+str(len(pos))+' posterior samples.') html_meta.p('Samples read from %s'%(data[0])) #Create a section for model selection results (if they exist) if bayesfactornoise is not None: html_model=html.add_section('Model selection') html_model.p('log Bayes factor ( coherent vs gaussian noise) = %s, Bayes factor=%f'%(BSN,exp(float(BSN)))) if bayesfactorcoherent is not None: html_model.p('log Bayes factor ( coherent vs incoherent OR noise ) = %s, Bayes factor=%f'%(BCI,exp(float(BCI)))) #Create a section for summary statistics html_stats=html.add_section('Summary statistics') html_stats.write(str(pos)) #==================================================================# #Generate sky map #==================================================================# #If sky resolution parameter has been specified try and create sky map... skyreses=None sky_injection_cl=None if skyres is not None and 'ra' in pos.names and 'dec' in pos.names: #Greedy bin sky samples (ra,dec) into a grid on the sky which preserves #? top_ranked_sky_pixels,sky_injection_cl,skyreses,injection_area=bppu.greedy_bin_sky(pos,skyres,confidence_levels) print "BCI for sky area:" print skyreses #Create sky map in outdir bppu.plot_sky_map(top_ranked_sky_pixels,outdir) #Create a web page section for sky localization results/plots html_sky=html.add_section('Sky Localization') if injection: if sky_injection_cl: html_sky.p('Injection found at confidence interval %f in sky location'%(sky_injection_cl)) else: html_sky.p('Injection not found in posterior bins in sky location!') html_sky.write('<img width="35%" src="skymap.png"/>') if skyres is not None: html_sky_write='<table border="1"><tr><th>Confidence region</th><th>size (sq. deg)</th></tr>' fracs=skyreses.keys() fracs.sort() skysizes=[skyreses[frac] for frac in fracs] for frac,skysize in zip(fracs,skysizes): html_sky_write+=('<tr><td>%f</td><td>%f</td></tr>'%(frac,skysize)) html_sky_write+=('</table>') html_sky.write(html_sky_write) #==================================================================# #2D posteriors #==================================================================# #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. #Make a folder for the 2D kde plots margdir=os.path.join(outdir,'2Dkde') if not os.path.isdir(margdir): os.makedirs(margdir) twobinsdir=os.path.join(outdir,'2Dbins') if not os.path.isdir(twobinsdir): os.makedirs(twobinsdir) #Add a section to the webpage for a table of the confidence interval #results. html_tcig=html.add_section('2D confidence intervals (greedy binning)') #Generate the top part of the table html_tcig_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_tcig_write+='<th>%f</th>'%cl if injection: html_tcig_write+='<th>Injection Confidence Level</th>' html_tcig_write+='<th>Injection Confidence Interval</th>' html_tcig_write+='</tr>' #= Add a section for a table of 2D marginal PDFs (kde) html_tcmp=html.add_section('2D Marginal PDFs') html_tcmp.br() #Table matter html_tcmp_write='<table border="1" width="100%">' row_count=0 for par1_name,par2_name in twoDGreedyMenu: par1_name=par1_name.lower() par2_name=par2_name.lower() print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) try: pos[par1_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par1_name continue try: pos[par2_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par2_name continue #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Form greedy binning input structure greedy2Params={par1_name:par1_bin,par2_name:par2_bin} #Greedy bin the posterior samples toppoints,injection_cl,reses,injection_area=\ bppu.greedy_bin_two_param(pos,greedy2Params,confidence_levels) print "BCI %s-%s:"%(par1_name,par2_name) print reses #Generate new BCI html table row BCItableline='<tr><td>%s-%s</td>'%(par1_name,par2_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injection_cl is not None: BCItableline+='<td>%f</td>'%injection_cl BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_tcig_write+=BCItableline #= Plot 2D histograms of greedily binned points =# #greedy2PlotFig=bppu.plot_two_param_greedy_bins(np.array(toppoints),pos,greedy2Params) #greedy2PlotFig.savefig(os.path.join(twobinsdir,'%s-%s_greedy2.png'%(par1_name,par2_name))) #= Generate 2D kde plots =# print 'Generating %s-%s plot'%(par1_name,par2_name) par1_pos=pos[par1_name].samples par2_pos=pos[par2_name].samples if (size(np.unique(par1_pos))<2 or size(np.unique(par2_pos))<2): continue plot2DkdeParams={par1_name:50,par2_name:50} myfig=bppu.plot_two_param_kde(pos,plot2DkdeParams) twoDKdePath=os.path.join(margdir,par1_name+'-'+par2_name+'_2Dkernel.png') if row_count==0: html_tcmp_write+='<tr>' html_tcmp_write+='<td width="30%"><img width="100%" src="'+twoDKdePath+'"/></td>' row_count+=1 if row_count==3: html_tcmp_write+='</tr>' row_count=0 myfig.savefig(twoDKdePath) #Finish off the BCI table and write it into the etree html_tcig_write+='</table>' html_tcig.write(html_tcig_write) #Finish off the 2D kde plot table while row_count!=0: html_tcmp_write+='<td/>' row_count+=1 if row_count==3: row_count=0 html_tcmp_write+='</tr>' html_tcmp_write+='</table>' html_tcmp.write(html_tcmp_write) #Add a link to all plots html_tcmp.br() html_tcmp.a("2D/",'All 2D Marginal PDFs') html_tcmp.hr() #==================================================================# #1D posteriors #==================================================================# #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. #Add section for 1D confidence intervals html_ogci=html.add_section('1D confidence intervals (greedy binning)') #Generate the top part of the table html_ogci_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_ogci_write+='<th>%f</th>'%cl if injection: html_ogci_write+='<th>Injection Confidence Level</th>' html_ogci_write+='<th>Injection Confidence Interval</th>' html_ogci_write+='</tr>' #Add section for 1D marginal PDFs and sample plots html_ompdf=html.add_section('1D marginal posterior PDFs') html_ompdf.br() #Table matter html_ompdf_write= '<table><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th></tr>' onepdfdir=os.path.join(outdir,'1Dpdf') if not os.path.isdir(onepdfdir): os.makedirs(onepdfdir) sampsdir=os.path.join(outdir,'1Dsamps') if not os.path.isdir(sampsdir): os.makedirs(sampsdir) for par_name in oneDMenu: par_name=par_name.lower() print "Binning %s to determine confidence levels ..."%par_name try: pos[par_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue binParams={par_name:par_bin} toppoints,injectionconfidence,reses,injection_area=bppu.greedy_bin_one_param(pos,binParams,confidence_levels) oneDContCL,oneDContInj = bppu.contigious_interval_one_param(pos,binParams,confidence_levels) #Generate new BCI html table row BCItableline='<tr><td>%s</td>'%(par_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injectionconfidence is not None and injection_area is not None: BCItableline+='<td>%f</td>'%injectionconfidence BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_ogci_write+=BCItableline #Generate 1D histogram/kde plots print "Generating 1D plot for %s."%par_name oneDPDFParams={par_name:50} rbins,plotFig=bppu.plot_one_param_pdf(pos,oneDPDFParams) figname=par_name+'.png' oneDplotPath=os.path.join(onepdfdir,figname) plotFig.savefig(oneDplotPath) if rbins: print "r of injected value of %s (bins) = %f"%(par_name, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) pos_samps=pos[par_name].samples plt.plot(pos_samps,'.',figure=myfig) injpar=pos[par_name].injval if injpar: if min(pos_samps)<injpar and max(pos_samps)>injpar: plt.plot([0,len(pos_samps)],[injpar,injpar],'r-.') myfig.savefig(os.path.join(sampsdir,figname.replace('.png','_samps.png'))) html_ompdf_write+='<tr><td><img src="1Dpdf/'+figname+'"/></td><td><img src="1Dsamps/'+figname.replace('.png','_samps.png')+'"/></td></tr>' html_ompdf_write+='</table>' html_ompdf.write(html_ompdf_write) html_ogci_write+='</table>' html_ogci.write(html_ogci_write) html_ogci.hr() html_ogci.br() html_ompdf.hr() html_ompdf.br() html_footer=html.add_section('') html_footer.p('Produced using cbcBayesPostProc.py at '+strftime("%Y-%m-%d %H:%M:%S")+' .') html_footer.p(git_version.verbose_msg) #Save results page resultspage=open(os.path.join(outdir,'posplots.html'),'w') resultspage.write(str(html)) # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') input_file=open(data[0]) posfile.write(input_file.read()) # posfilename2=os.path.join(outdir,'posterior_samples2.dat') pos.write_to_file(posfilename2) #Close files input_file.close() posfile.close() resultspage.close() | 25081af9755dc3b90ae7c9418d3526e51860c233 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/25081af9755dc3b90ae7c9418d3526e51860c233/cbcBayesPostProc.py |
inspiral.InspiralNode.__init__(self,job) | pipeline.CondorDAGNode.__init__(self,job) pipeline.AnalysisNode.__init__(self) | def __init__(self, dag, job, cp, opts, sngl, frame_cache, chia, tag, p_nodes=[]): | 8397d083fc2a94fb7188f06b24add6fa5dcb7930 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/8397d083fc2a94fb7188f06b24add6fa5dcb7930/stfu_pipe.py |
self.add_var_opt(param,value) | def __init__(self, dag, job, cp, opts, sngl, frame_cache, chia, tag, p_nodes=[]): | 8397d083fc2a94fb7188f06b24add6fa5dcb7930 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/8397d083fc2a94fb7188f06b24add6fa5dcb7930/stfu_pipe.py |
|
inspiral.ChiaNode.__init__(self,job) | pipeline.CondorDAGNode.__init__(self,job) pipeline.AnalysisNode.__init__(self) | def __init__(self, dag, job, cp, opts, coinc, inspiral_node_dict, chia_node =None, p_nodes = []): | 8397d083fc2a94fb7188f06b24add6fa5dcb7930 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/8397d083fc2a94fb7188f06b24add6fa5dcb7930/stfu_pipe.py |
f = open(opts.galaxy_priors_dir+'/'+str(int(mineffD))+'Mpc.pkl','r') | f = open(opts.galaxy_priors_dir+'/galaxy_prior_'+str(int(mineffD))+'Mpc.pkl','r') | def get_unique_filename(name): """ use this to avoid name collisions """ counter = 1 base_name, ext = os.path.splitext(name) while os.path.isfile(name): name = base_name + '_' + str(counter) + ext counter += 1 return name | 5d57777704fdde7db5dca49e7d89c4111f8d4dc7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5d57777704fdde7db5dca49e7d89c4111f8d4dc7/run_skypoints.py |
coarsedict = {} | coarsedict = {} | def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid[:] coarsedict = {} ds = coarseres*pi/180.0 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4.0 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*sin(cpt[0])*sin(cpt[0]) <= ds*ds/4.0: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp | 18bcbad7a0f00300278669b07bffc33c996bd96d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/18bcbad7a0f00300278669b07bffc33c996bd96d/skylocutils.py |
if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4.0 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*sin(cpt[0])*sin(cpt[0]) <= ds*ds/4.0: | if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) - ds*ds/4.0 <= epsilon and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*sin(cpt[0])*sin(cpt[0]) \ - ds*ds/4.0 <= epsilon: | def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid[:] coarsedict = {} ds = coarseres*pi/180.0 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4.0 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*sin(cpt[0])*sin(cpt[0]) <= ds*ds/4.0: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp | 18bcbad7a0f00300278669b07bffc33c996bd96d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/18bcbad7a0f00300278669b07bffc33c996bd96d/skylocutils.py |
mv $1 $2/. tar -xzvf $2/$1 | currentPath=`pwd` ; mv $1 $2/. ; cd $2 ; tar -xzvf $1 ; cd $currentPath ; | def setup_distrib_script(self,tag_base): distrib_script = open('distribRemoteScan_'+tag_base+'.sh','w') distrib_script.write("""#!/bin/bash | 154fa74bf8e4d8677cc922914fbb92d97e85216c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/154fa74bf8e4d8677cc922914fbb92d97e85216c/stfu_pipe.py |
rm $2/$1 | rm $2/$1 ; | def setup_distrib_script(self,tag_base): distrib_script = open('distribRemoteScan_'+tag_base+'.sh','w') distrib_script.write("""#!/bin/bash | 154fa74bf8e4d8677cc922914fbb92d97e85216c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/154fa74bf8e4d8677cc922914fbb92d97e85216c/stfu_pipe.py |
raise NotImplemented | raise NotImplementedError | def add_content(self, data, label="_nolabel_"): """ Stub. Replace with a method that appends values or lists of values to self.data_sets and appends labels to self.data_labels. Feel free to accept complicated inputs, but try to store only the raw numbers that will enter the plot. """ raise NotImplemented | 26a99ad4b2deb087e7d6b8eba6399c04c01d95b4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/26a99ad4b2deb087e7d6b8eba6399c04c01d95b4/plotutils.py |
raise NotImplemented | raise NotImplementedError | def finalize(self): """ Stub. Replace with a function that creates and makes your plot pretty. Do not do I/O here. """ raise NotImplemented | 26a99ad4b2deb087e7d6b8eba6399c04c01d95b4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/26a99ad4b2deb087e7d6b8eba6399c04c01d95b4/plotutils.py |
for sngl in coincEvent.sngl_inspiral.itervalues(): myArgString=myArgString+"%s,"%sngl.ifo | if hasattr(coincEvent, "sngl_inspiral"): for sngl in coincEvent.sngl_inspiral.itervalues(): myArgString=myArgString+"%s,"%sngl.ifo elif hasattr(coincEvent, "ifos_list"): for ifo in coincEvent.ifos_list: myArgString=myArgString+"%s,"%ifo | def __init__(self, dag, job, cp, opts, coincEvent=None): """ """ self.__conditionalLoadDefaults__(findFlagsNode.defaults,cp) pipeline.CondorDAGNode.__init__(self,job) self.add_var_opt("trigger-time",coincEvent.time) #Output filename oFilename="%s-findFlags_%s_%s.wiki"%(coincEvent.instruments, coincEvent.ifos, coincEvent.time) self.add_var_opt("output-file",job.outputPath+'/DataProducts/'+oFilename) self.add_var_opt("segment-url",cp.get('findFlags','segment-url')) self.add_var_opt("output-format",cp.get('findFlags','output-format')) self.add_var_opt("window",cp.get('findFlags','window')) #IFO arg string myArgString="" for sngl in coincEvent.sngl_inspiral.itervalues(): myArgString=myArgString+"%s,"%sngl.ifo myArgString=myArgString.rstrip(",") self.add_var_opt("ifo-list",myArgString) if not opts.disable_dag_categories: self.set_category(job.name.lower()) | 56395bb647ab92635e1398364b06f48bf65527b3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/56395bb647ab92635e1398364b06f48bf65527b3/stfu_pipe.py |
for sngl in coincEvent.sngl_inspiral.itervalues(): myArgString=myArgString+"%s,"%sngl.ifo | if hasattr(coincEvent, "sngl_inspiral"): for sngl in coincEvent.sngl_inspiral.itervalues(): myArgString=myArgString+"%s,"%sngl.ifo elif hasattr(coincEvent, "ifos_list"): for ifo in coincEvent.ifos_list: myArgString=myArgString+"%s,"%ifo | def __init__(self, dag, job, cp, opts, coincEvent=None): """ """ self.__conditionalLoadDefaults__(findVetosNode.defaults,cp) pipeline.CondorDAGNode.__init__(self,job) self.add_var_opt("trigger-time",coincEvent.time) #Output filename oFilename="%s-findVetos_%s_%s.wiki"%(coincEvent.instruments, coincEvent.ifos, coincEvent.time) self.add_var_opt("output-file",job.outputPath+'/DataProducts/'+oFilename) self.add_var_opt("segment-url",cp.get('findFlags','segment-url')) self.add_var_opt("output-format",cp.get('findFlags','output-format')) self.add_var_opt("window",cp.get('findFlags','window')) #IFO arg string myArgString="" for sngl in coincEvent.sngl_inspiral.itervalues(): myArgString=myArgString+"%s,"%sngl.ifo myArgString=myArgString.rstrip(",") self.add_var_opt("ifo-list",myArgString) if not opts.disable_dag_categories: self.set_category(job.name.lower()) if not opts.no_findVetoes: dag.add_node(self) self.validate() else: self.invalidate() | 56395bb647ab92635e1398364b06f48bf65527b3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/56395bb647ab92635e1398364b06f48bf65527b3/stfu_pipe.py |
maxbin=0 | def getinjpar(inj,parnum): if parnum==0: return inj.mchirp if parnum==1: return inj.eta if parnum==2: return inj.get_end() if parnum==3: return inj.phi0 if parnum==4: return inj.distance if parnum==5: return inj.longitude if parnum==6: return inj.latitude if parnum==7: return inj.polarization if parnum==8: return inj.inclination return None | be81ea5f646bb2637267e96ded5c2c39e7847000 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/be81ea5f646bb2637267e96ded5c2c39e7847000/OddsPostProc.py |
|
hist[maxbin]=0 frac=frac+(maxbin/len(pos)) | hist[maxpos]=0 frac=frac+(float(maxbin)/float(len(pos))) | def getinjpar(inj,parnum): if parnum==0: return inj.mchirp if parnum==1: return inj.eta if parnum==2: return inj.get_end() if parnum==3: return inj.phi0 if parnum==4: return inj.distance if parnum==5: return inj.longitude if parnum==6: return inj.latitude if parnum==7: return inj.polarization if parnum==8: return inj.inclination return None | be81ea5f646bb2637267e96ded5c2c39e7847000 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/be81ea5f646bb2637267e96ded5c2c39e7847000/OddsPostProc.py |
htmlfile.write('<table border=1><tr>') | htmlfile.write('<table border=1 width=100%><tr>') | def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo) | 53137fadc21ddd0e3e68ed6dda56793f5e2409e7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/53137fadc21ddd0e3e68ed6dda56793f5e2409e7/cbcBayesSkyRes.py |
oneDplotPath=os.path.join(outdir,param+'.png') | figname=param+'.png' oneDplotPath=os.path.join(outdir,figname) | def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo) | 53137fadc21ddd0e3e68ed6dda56793f5e2409e7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/53137fadc21ddd0e3e68ed6dda56793f5e2409e7/cbcBayesSkyRes.py |
myfig.savefig(os.path.join(outdir,param+'_samps.png')) | myfig.savefig(os.path.join(outdir,figname.replace('.png','_samps.png'))) | def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo) | 53137fadc21ddd0e3e68ed6dda56793f5e2409e7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/53137fadc21ddd0e3e68ed6dda56793f5e2409e7/cbcBayesSkyRes.py |
oneDplotPaths.append(oneDplotPath) | oneDplotPaths.append(figname) | def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo) | 53137fadc21ddd0e3e68ed6dda56793f5e2409e7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/53137fadc21ddd0e3e68ed6dda56793f5e2409e7/cbcBayesSkyRes.py |
oneDMenu=['mtotal','m1','m2','mchirp','mc','distance','distMPC','dist','iota','eta','RA','dec','a1','a2','phi1','theta1','phi2','theta2'] | oneDMenu=['mtotal','m1','m2','mchirp','mc','distance','distMPC','dist','iota','psi','eta','RA','dec','a1','a2','phi1','theta1','phi2','theta2'] | def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo) | 53137fadc21ddd0e3e68ed6dda56793f5e2409e7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/53137fadc21ddd0e3e68ed6dda56793f5e2409e7/cbcBayesSkyRes.py |
twoDplots=[['mc','eta'],['mchirp','eta'],['m1','m2'],['mtotal','eta'],['distance','iota'],['dist','iota'],['RA','dec'],['m1','dist'],['m2','dist']] | twoDplots=[['mc','eta'],['mchirp','eta'],['m1','m2'],['mtotal','eta'],['distance','iota'],['dist','iota'],['RA','dec'],['m1','dist'],['m2','dist'],['psi','iota'],['psi','distance'],['psi','dist'],['psi','phi0']] | def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo) | 53137fadc21ddd0e3e68ed6dda56793f5e2409e7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/53137fadc21ddd0e3e68ed6dda56793f5e2409e7/cbcBayesSkyRes.py |
os.path.join("bin", "coh_PTF_html_summary"), os.path.join("bin", "coh_PTF_injfinder"), os.path.join("bin", "coh_PTF_sbv_plotter"), os.path.join("bin", "coh_PTF_trig_cluster"), os.path.join("bin", "coh_PTF_trig_combiner"), os.path.join("bin", "ring_post"), | os.path.join("bin", "coh_PTF_html_summary"), os.path.join("bin", "coh_PTF_injfinder"), os.path.join("bin", "coh_PTF_sbv_plotter"), os.path.join("bin", "coh_PTF_trig_cluster"), os.path.join("bin", "coh_PTF_trig_combiner"), os.path.join("bin", "ring_post"), | def run(self): # remove the automatically generated user env scripts for script in ["pylal-user-env.sh", "pylal-user-env.csh"]: log.info("removing " + script ) try: os.unlink(os.path.join("etc", script)) except: pass | de51ad9d89a7b3093c8fb0b3a547b5f14578ca8e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/de51ad9d89a7b3093c8fb0b3a547b5f14578ca8e/setup.py |
if pt[3] <= dtrss_inj and pt[3] != 0.0: | if pt[3] <= dtrss_inj: | def get_unique_filename(name): """ use this to avoid name collisions """ counter = 1 base_name, ext = os.path.splitext(name) while os.path.isfile(base_name): base_name = base_name + '_' + str(counter) + ext counter += 1 return base_name + ext | 3699225b3ce593e2fd33be880be8bf442ddf1f84 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/3699225b3ce593e2fd33be880be8bf442ddf1f84/run_skypoints.py |
self._InspiralAnalysisNode__pad_data = 0 | self._AnalysisNode__pad_data = 0 | def __init__(self, options, cp, dir='', tag_base=''): """ """ self.__conditionalLoadDefaults__(followUpChiaJob.defaults,cp) #self.__prog__ = 'followUpChiaJob' self.__executable = string.strip(cp.get('condor','chia')) self.__universe = "standard" pipeline.CondorDAGJob.__init__(self,self.__universe,self.__executable) self.add_condor_cmd('getenv','True') self._InspiralAnalysisNode__pad_data = 0 | 24648911372ea1df253fc45ee5f0a9b8ead25a34 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/24648911372ea1df253fc45ee5f0a9b8ead25a34/stfu_pipe.py |
self._InspiralAnalysisNode__pad_data = int(value) | self._AnalysisNode__pad_data = int(value) | def __init__(self, dag, job, cp, opts, sngl, frame_cache, chia, tag, p_nodes=[]): | 24648911372ea1df253fc45ee5f0a9b8ead25a34 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/24648911372ea1df253fc45ee5f0a9b8ead25a34/stfu_pipe.py |
self._InspiralAnalysisNode__pad_data = 0 | self._AnalysisNode__pad_data = 0 | def __init__(self, dag, job, cp, opts, coinc, inspiral_node_dict, chia_node =None, p_nodes = []): | 24648911372ea1df253fc45ee5f0a9b8ead25a34 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/24648911372ea1df253fc45ee5f0a9b8ead25a34/stfu_pipe.py |
flines[:,i]=asin(flines[:,i]) | flines[:,i]=arcsin(flines[:,i]) | def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') for line in infile: sline=line.split() proceed=True for s in sline: if dec.search(s) is not None: print 'Warning! Ignoring non-numeric data after the header: %s'%(sline) proceed=False if proceed: llines.append(array(map(float,sline))) flines=array(llines) for i in range(0,len(header)): if header[i].lower().find('log')!=-1 and header[i].lower()!='logl': print 'exponentiating %s'%(header[i]) flines[:,i]=exp(flines[:,i]) header[i]=header[i].replace('log','') if header[i].lower().find('sin')!=-1: print 'asining %s'%(header[i]) flines[:,i]=asin(flines[:,i]) header[i]=header[i].replace('sin','') if header[i].lower().find('cos')!=-1: print 'acosing %s'%(header[i]) flines[:,i]=acos(flines[:,i]) header[i]=header[i].replace('cos','') header[i]=header[i].replace('(','') header[i]=header[i].replace(')','') print 'Read columns %s'%(str(header)) return header,flines | 11f936baaaabc17f21e8899cc738b602b5998e8c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/11f936baaaabc17f21e8899cc738b602b5998e8c/cbcBayesSkyRes.py |
flines[:,i]=acos(flines[:,i]) | flines[:,i]=arccos(flines[:,i]) | def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') for line in infile: sline=line.split() proceed=True for s in sline: if dec.search(s) is not None: print 'Warning! Ignoring non-numeric data after the header: %s'%(sline) proceed=False if proceed: llines.append(array(map(float,sline))) flines=array(llines) for i in range(0,len(header)): if header[i].lower().find('log')!=-1 and header[i].lower()!='logl': print 'exponentiating %s'%(header[i]) flines[:,i]=exp(flines[:,i]) header[i]=header[i].replace('log','') if header[i].lower().find('sin')!=-1: print 'asining %s'%(header[i]) flines[:,i]=asin(flines[:,i]) header[i]=header[i].replace('sin','') if header[i].lower().find('cos')!=-1: print 'acosing %s'%(header[i]) flines[:,i]=acos(flines[:,i]) header[i]=header[i].replace('cos','') header[i]=header[i].replace('(','') header[i]=header[i].replace(')','') print 'Read columns %s'%(str(header)) return header,flines | 11f936baaaabc17f21e8899cc738b602b5998e8c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/11f936baaaabc17f21e8899cc738b602b5998e8c/cbcBayesSkyRes.py |
def add(self, *args): packing.Bin.add(self, *args) | def add(self, cache_entry): packing.Bin.add(self, cache_entry, cache_entry.to_segmentlistdict()) | def add(self, *args): packing.Bin.add(self, *args) self.extent = self.size.extent_all() return self | 5d6922774e4d74becff03265f02ee7265be8e613 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5d6922774e4d74becff03265f02ee7265be8e613/ligolw_cafe.py |
new.add(cache_entry, cache_entry.to_segmentlistdict()) | new.add(cache_entry) | def pack(self, cache_entry): """ Find all bins in which this glue.lal.CacheEntry instance belongs, merge them, and add this cache entry to the result. Create a new bin for this cache entry if it does not belong in any of the existing bins. | 5d6922774e4d74becff03265f02ee7265be8e613 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5d6922774e4d74becff03265f02ee7265be8e613/ligolw_cafe.py |
splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] | extents = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(abs(origbin.extent)) / n) for i in range(1, n)] + [+segments.infinity()] | def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done # | 5d6922774e4d74becff03265f02ee7265be8e613 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5d6922774e4d74becff03265f02ee7265be8e613/ligolw_cafe.py |
print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] | print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(extent) for extent in extents[1:-1])) extents = [segments.segment(*bounds) & origbin.extent for bounds in zip(extents[:-1], extents[1:])] | def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done # | 5d6922774e4d74becff03265f02ee7265be8e613 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5d6922774e4d74becff03265f02ee7265be8e613/ligolw_cafe.py |
for split in splits: | for extent in extents: | def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done # | 5d6922774e4d74becff03265f02ee7265be8e613 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5d6922774e4d74becff03265f02ee7265be8e613/ligolw_cafe.py |
newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) | extent_plus_max_gap = extent.protract(cafepacker.max_gap) | def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done # | 5d6922774e4d74becff03265f02ee7265be8e613 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5d6922774e4d74becff03265f02ee7265be8e613/ligolw_cafe.py |
if cache_entry.segment.disjoint(bin_extent_plus_max_gap): | if cache_entry.segment.disjoint(extent_plus_max_gap): | def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done # | 5d6922774e4d74becff03265f02ee7265be8e613 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5d6922774e4d74becff03265f02ee7265be8e613/ligolw_cafe.py |
if cache_entry_segs.intersects_segment(bin.extent): | if cache_entry_segs.intersects_segment(extent): | def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done # | 5d6922774e4d74becff03265f02ee7265be8e613 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5d6922774e4d74becff03265f02ee7265be8e613/ligolw_cafe.py |
bin.objects.append(cache_entry) | newbins[-1].add(cache_entry) | def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done # | 5d6922774e4d74becff03265f02ee7265be8e613 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5d6922774e4d74becff03265f02ee7265be8e613/ligolw_cafe.py |
injection_area=bin_size*i | injection_area=bin_size*(i+1) | def _greedy_bin(greedyHist,greedyPoints,injection_bin_index,bin_size,Nsamples,confidence_levels): """ An interal function representing the common, dimensionally-independent part of the greedy binning algorithms. """ #Now call confidence level C extension function to determine top-ranked pixels (injectionconfidence,toppoints)=_calculate_confidence_levels( greedyHist, greedyPoints, injection_bin_index, bin_size, Nsamples ) #Determine interval/area contained within given confidence intervals nBins=0 confidence_levels.sort() reses={} toppoints=np.array(toppoints) for printcl in confidence_levels: nBins=1 #Start at top of list of ranked pixels... accl=toppoints[0,3] #Loop over next significant pixels and their confidence levels while accl<printcl and nBins<=len(toppoints): nBins=nBins+1 accl=toppoints[nBins-1,3] reses[printcl]=nBins*bin_size #Find area injection_area=None if injection_bin_index and injectionconfidence: i=list(np.nonzero(np.asarray(toppoints)[:,2]==injection_bin_index))[0] injection_area=bin_size*i return toppoints,injectionconfidence,reses,injection_area | 927ddfda6dee0b24ed378ab3fbfbeed5d5522965 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/927ddfda6dee0b24ed378ab3fbfbeed5d5522965/bayespputils.py |
def fetchInformation(self,triggerTime=None,window=300): """ Wrapper for fetchInformationDualWindow that mimics original behavior """ return self.fetchInformationDualWindow(triggerTime,window,window,ifoList='DEFAULT') | 5800af6be75ac2540e1e98d21cc7afadf15bf2aa /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5800af6be75ac2540e1e98d21cc7afadf15bf2aa/fu_utils.py |
||
later. """ | later. """ | def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \ | 5800af6be75ac2540e1e98d21cc7afadf15bf2aa /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5800af6be75ac2540e1e98d21cc7afadf15bf2aa/fu_utils.py |
for (ifo,epoch) in ifoEpochList: | except: backgroundPickle=False sys.stderr.write("Error importing the pickle file! %s\n"\ %(pickleLocale)) return for (ifo,epoch) in ifoEpochList: | def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \ | 5800af6be75ac2540e1e98d21cc7afadf15bf2aa /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5800af6be75ac2540e1e98d21cc7afadf15bf2aa/fu_utils.py |
generated background %s"%self.__backgroundDict__["ifoepoch"] except: backgroundPickle=False sys.stderr.write("Error importing the pickle file!\n") if os.access(os.path.split(pickleLocal)[0],os.W_OK): os.path.rename(pickleLocale,pickleLocale+".corrupt") | generated background expected %s got %s"%(\ self.__backgroundDict__["ifoepoch"], ifoEpochList) | def createDQbackground(self,ifoEpochList=list(),pickleLocale=None): """ Two inputs a list of tuples (ifo,epochname) for each instrument. Also a place to save the potential pickle to for quick access later. """ if type(ifoEpochList) != type(list()): raise Exception, \ "Invalid input argument ifoEpochList,%s type(%s)"\ %(ifoEpochList,type(ifoEpochList)) #Make sure epoch exists for reach ifo for ifo,epoch in ifoEpochList: if ifo not in runEpochs.keys(): raise Exception, "Bad ifo specified, %s"%ifo if epoch not in runEpochs[ifo].keys(): raise Exception, "Bad ifo epoch specified, %s:%s"%(ifo,epoch) #If pickle location given try to load that pickle first. backgroundPickle=False if pickleLocale!=None: #If pickle file exists read it if not make sure we can #generate it properly otherwise skip creating background if os.path.isfile(pickleLocale): try: self.__backgroundDict__=cPickle.load(file(pickleLocale,'r')) backgroundPickle=True for (ifo,epoch) in ifoEpochList: if (ifo.upper().strip(),epoch.upper().strip()) \ not in self.__backgroundDict__["ifoepoch"]: raise Exception,\ "Invalid ifo and epoch information in \ | 5800af6be75ac2540e1e98d21cc7afadf15bf2aa /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5800af6be75ac2540e1e98d21cc7afadf15bf2aa/fu_utils.py |
ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] | ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in self.ifos] | def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating | 5800af6be75ac2540e1e98d21cc7afadf15bf2aa /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5800af6be75ac2540e1e98d21cc7afadf15bf2aa/fu_utils.py |
sys.stderr.write("Aborting tabulate of binomial P\n") | sys.stderr.write("Aborting tabulation of binomial P\n") | def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating | 5800af6be75ac2540e1e98d21cc7afadf15bf2aa /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5800af6be75ac2540e1e98d21cc7afadf15bf2aa/fu_utils.py |
for myIfo,flagList in seenFlags.iteritems(): | for myIfo in seenFlags.keys(): | def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating | 5800af6be75ac2540e1e98d21cc7afadf15bf2aa /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5800af6be75ac2540e1e98d21cc7afadf15bf2aa/fu_utils.py |
for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) | if myIfo.strip() not in self.__backgroundDict__.keys(): if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for outsideFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][outsideFlag]=float(-0.0) else: for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) | def estimateDQbackground(self): """ This method looks at the self.resultlist inside the instance. Using this and 1000 generated time stamp it tabulates a ranking of flag prevelance, binomial probability 'p' """ if len(self.resultList) < 1: self.__backgroundResults__=list() self.__backgroundTimesDict__=dict() self.__backgroundDict__=dict() self.__haveBackgroundDict__=bool(False) return #Create DQ background, specify pickle locale to try and load first #Determine which IFOs from DQ listing uniqIfos=list() for ifo,b,c,d,e,f in self.resultList: if ifo not in uniqIfos: uniqIfos.append(ifo) ifoEpochList=[(x,getRunEpoch(self.triggerTime,x)) for x in uniqIfos] self.createDQbackground(ifoEpochList,self.__backgroundPickle__) #Calculate the binomial 'p' value for the flags in the table. if self.resultList < 1: sys.stderr.write("Aborting tabulate of binomial P\n") os.abort() seenFlags=dict() for ifo,name,version,comment,start,stop in self.resultList: if ifo.strip() not in seenFlags.keys(): seenFlags[ifo]=list() seenFlags[ifo].append(name) for myIfo,flagList in seenFlags.iteritems(): tmpFlags=list() for backgroundTime,backgroundFlags in \ self.__backgroundDict__[myIfo.strip()].iteritems(): tmpFlags.extend([name for ifo,name,ver,com,start,stop in backgroundFlags]) if myIfo not in self.__backgroundResults__.keys(): self.__backgroundResults__[myIfo]=dict() for myFlag in seenFlags[myIfo]: self.__backgroundResults__[myIfo][myFlag]=tmpFlags.count(myFlag)/float(self.__backgroundPoints__) self.__haveBackgroundDict__=True #Return background estimating | 5800af6be75ac2540e1e98d21cc7afadf15bf2aa /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5800af6be75ac2540e1e98d21cc7afadf15bf2aa/fu_utils.py |
tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) | tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,\ size,myBackgroundRank,myCategory) | def generateHTMLTable(self,tableType="BOTH"): """ Return a HTML table already formatted using the module MARKUP to keep the HTML tags complient. This method does nothing but return the result of the last call to self.fetchInformation() The flag names associated with LIGO will have links to the channel wiki in them also. Types that will invoke a not everything behaviour are DQ and VETO """ ligo=["L1","H1","H2","V1"] channelWiki="https://ldas-jobs.ligo.caltech.edu/cgi-bin/chanwiki?%s" if self.triggerTime==int(-1): return "" myColor="grey" tableString="<table bgcolor=grey border=1px>" titleString="<tr>" tableEmptyString="<tr bgcolor=%s>"%myColor rowString="<tr bgcolor=%s> " for col in self.__columns__: titleString+="<th>%s</th>"%col rowString+="<td>%s</td>" tableEmptyString+="<td>None</td>" titleString+="</tr>\n" tableEmptyString+="</tr>\n" rowString+="</tr>\n" tableString+=titleString if len(self.resultList) == 0: tableString+=tableEmptyString for ifo,name,version,comment,start,stop in self.resultList: #If we have background information fetch it if self.__haveBackgroundDict__: myBackgroundRank=str("%3.1f"%(100.0*self.__backgroundResults__[ifo][name])).rjust(5) else: myBackgroundRank="None" if self.__havecategories__: myCategory=self.__category__[ifo][name] else: myCategory="None" offset1=start-self.triggerTime offset2=stop-self.triggerTime size=int(stop-start) if (offset1>=0) and (offset2>=0): myColor="green" if (offset1<=0) and (offset2<=0): myColor="yellow" if (offset1<=0) and (offset2>=0): myColor="red" if name.lower().__contains__('science'): myColor="skyblue" if tableType.upper().strip() == "DQ": if not name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) elif tableType.upper().strip() == "VETO": if name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) elif tableType.upper().strip() not in ["VETO","DQ"]: tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,size,\ myBackgroundRank,myCategory) tableString+="</table>" return tableString | 5800af6be75ac2540e1e98d21cc7afadf15bf2aa /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5800af6be75ac2540e1e98d21cc7afadf15bf2aa/fu_utils.py |
tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) | tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,\ size,myBackgroundRank,myCategory) | def generateHTMLTable(self,tableType="BOTH"): """ Return a HTML table already formatted using the module MARKUP to keep the HTML tags complient. This method does nothing but return the result of the last call to self.fetchInformation() The flag names associated with LIGO will have links to the channel wiki in them also. Types that will invoke a not everything behaviour are DQ and VETO """ ligo=["L1","H1","H2","V1"] channelWiki="https://ldas-jobs.ligo.caltech.edu/cgi-bin/chanwiki?%s" if self.triggerTime==int(-1): return "" myColor="grey" tableString="<table bgcolor=grey border=1px>" titleString="<tr>" tableEmptyString="<tr bgcolor=%s>"%myColor rowString="<tr bgcolor=%s> " for col in self.__columns__: titleString+="<th>%s</th>"%col rowString+="<td>%s</td>" tableEmptyString+="<td>None</td>" titleString+="</tr>\n" tableEmptyString+="</tr>\n" rowString+="</tr>\n" tableString+=titleString if len(self.resultList) == 0: tableString+=tableEmptyString for ifo,name,version,comment,start,stop in self.resultList: #If we have background information fetch it if self.__haveBackgroundDict__: myBackgroundRank=str("%3.1f"%(100.0*self.__backgroundResults__[ifo][name])).rjust(5) else: myBackgroundRank="None" if self.__havecategories__: myCategory=self.__category__[ifo][name] else: myCategory="None" offset1=start-self.triggerTime offset2=stop-self.triggerTime size=int(stop-start) if (offset1>=0) and (offset2>=0): myColor="green" if (offset1<=0) and (offset2<=0): myColor="yellow" if (offset1<=0) and (offset2>=0): myColor="red" if name.lower().__contains__('science'): myColor="skyblue" if tableType.upper().strip() == "DQ": if not name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) elif tableType.upper().strip() == "VETO": if name.upper().startswith("UPV"): tableString+=rowString%(myColor,ifo,name,version,start,offset1,stop,offset2,size) elif tableType.upper().strip() not in ["VETO","DQ"]: tableString+=rowString%(myColor,ifo,name,version,\ start,offset1,stop,offset2,size,\ myBackgroundRank,myCategory) tableString+="</table>" return tableString | 5800af6be75ac2540e1e98d21cc7afadf15bf2aa /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5800af6be75ac2540e1e98d21cc7afadf15bf2aa/fu_utils.py |
tableString+=emptyRowString%myColor | tableString+=emptyRowString | def generateMOINMOINTable(self,tableType="BOTH"): """ Return a MOINMOIN table. """ ligo=["L1","H1","H2","V1"] channelWiki="https://ldas-jobs.ligo.caltech.edu/cgi-bin/chanwiki?%s" if self.triggerTime==int(-1): return "" myColor="grey" tableString="" titleString="" emptyRowString="" rowString="" for i,col in enumerate(self.__columns__): if i == 0: titleString+="""||<rowbgcolor="%s"> %s """%(myColor,col) rowString+="""||<rowbgcolor="%s"> %s """ emptyRowString+="""||<rowbgcolor="%s"> None """%myColor else: titleString+="""|| %s """%col rowString+="""|| %s """ emptyRowString+="""|| None """ titleString+="""||\n""" rowString+="""||\n""" emptyRowString+="""||\n""" tableString+=titleString #Extract only DQ row or only VETO rows tmpResultList=list() for myRow in self.resultList: ifo,name,version,comment,start,stop=myRow #Select base on table type if ((tableType.upper() == "DQ") and \ (not name.strip().upper().startswith("UPV"))): tmpResultList.append(myRow) elif ((tableType.upper() == "VETO") and \ (name.strip().upper().startswith("UPV"))): tmpResultList.append(myRow) elif tableType.upper().strip() not in ["VETO","DQ"]: tmpResultList.append(myRow) if len(tmpResultList) == 0: tableString+=emptyRowString%myColor for ifo,name,version,comment,start,stop in tmpResultList: #If we have background information fetch it if self.__haveBackgroundDict__: myBackgroundRank=str("%3.1f"%(100.0*self.__backgroundResults__[ifo][name])).rjust(5) else: myBackgroundRank="None" if self.__havecategories__: myCategory=self.__category__[ifo][name] else: myCategory="None" offset1=start-self.triggerTime offset2=stop-self.triggerTime size=int(stop-start) if (offset1>=0) and (offset2>=0): myColor="green" if (offset1<=0) and (offset2<=0): myColor="yellow" if (offset1<=0) and (offset2>=0): myColor="red" if name.lower().__contains__('science'): myColor="skyblue" tableString+=rowString%(myColor,str(ifo).strip(),name,version,\ start,offset1,stop,offset2,size,\ myBackgroundRank,myCategory) tableString+="\n" return tableString | 5800af6be75ac2540e1e98d21cc7afadf15bf2aa /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5800af6be75ac2540e1e98d21cc7afadf15bf2aa/fu_utils.py |
myAngle=arcsin(dY/dX) | myAngle=arctan(dY/dX) | def scatterPointer(A=None,B=None): """ Expects two ordered pairs as tuples (x1,y1) and (x2,y2)... Then this defines an angle. From this angle we orient a triangle point and return this as a tuple of requested size. This can be plotted by the scatter plot to point in a particular orientation. The direction right(0,0)->(1,0) is angle 0 radians, then we rotate counter clockwise from there... What is returned in a three element tuple (numsides,style,angle) which can be put into a plot call via marker=X as a **kwarg """ if A == None or B == None: return (3,0,0) if( A != type(tuple()) and len(A) != 2) \ or \ ( B != type(tuple()) and len(B) != 2): return (3,0,0) # # Calculate orientation of triangle # Ang = arcsin(dY/dX) dY=float(B[-1]-A[-1]) dX=float(B[0]-A[0]) if dY>=0 and dX>=0: myAngle=arcsin(dY/dX) elif dY>=0 and dX<0: myAngle=pi-arcsin(dY/dX) elif dY<0 and dX<0: myAngle=arcsin(dY/dX)+pi elif dY<0 and dX>0: myAngle=(2.0*pi)-arcsin(dY/dX) else: myAngle=0 return (3,0,myAngle) | b9aca6a019f1e02fdda02f66dcfacc9eb65f7287 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/b9aca6a019f1e02fdda02f66dcfacc9eb65f7287/followupPDSurface.py |
myAngle=pi-arcsin(dY/dX) | myAngle=pi-arctan(dY/dX) | def scatterPointer(A=None,B=None): """ Expects two ordered pairs as tuples (x1,y1) and (x2,y2)... Then this defines an angle. From this angle we orient a triangle point and return this as a tuple of requested size. This can be plotted by the scatter plot to point in a particular orientation. The direction right(0,0)->(1,0) is angle 0 radians, then we rotate counter clockwise from there... What is returned in a three element tuple (numsides,style,angle) which can be put into a plot call via marker=X as a **kwarg """ if A == None or B == None: return (3,0,0) if( A != type(tuple()) and len(A) != 2) \ or \ ( B != type(tuple()) and len(B) != 2): return (3,0,0) # # Calculate orientation of triangle # Ang = arcsin(dY/dX) dY=float(B[-1]-A[-1]) dX=float(B[0]-A[0]) if dY>=0 and dX>=0: myAngle=arcsin(dY/dX) elif dY>=0 and dX<0: myAngle=pi-arcsin(dY/dX) elif dY<0 and dX<0: myAngle=arcsin(dY/dX)+pi elif dY<0 and dX>0: myAngle=(2.0*pi)-arcsin(dY/dX) else: myAngle=0 return (3,0,myAngle) | b9aca6a019f1e02fdda02f66dcfacc9eb65f7287 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/b9aca6a019f1e02fdda02f66dcfacc9eb65f7287/followupPDSurface.py |
myAngle=arcsin(dY/dX)+pi | myAngle=arctan(dY/dX)+pi | def scatterPointer(A=None,B=None): """ Expects two ordered pairs as tuples (x1,y1) and (x2,y2)... Then this defines an angle. From this angle we orient a triangle point and return this as a tuple of requested size. This can be plotted by the scatter plot to point in a particular orientation. The direction right(0,0)->(1,0) is angle 0 radians, then we rotate counter clockwise from there... What is returned in a three element tuple (numsides,style,angle) which can be put into a plot call via marker=X as a **kwarg """ if A == None or B == None: return (3,0,0) if( A != type(tuple()) and len(A) != 2) \ or \ ( B != type(tuple()) and len(B) != 2): return (3,0,0) # # Calculate orientation of triangle # Ang = arcsin(dY/dX) dY=float(B[-1]-A[-1]) dX=float(B[0]-A[0]) if dY>=0 and dX>=0: myAngle=arcsin(dY/dX) elif dY>=0 and dX<0: myAngle=pi-arcsin(dY/dX) elif dY<0 and dX<0: myAngle=arcsin(dY/dX)+pi elif dY<0 and dX>0: myAngle=(2.0*pi)-arcsin(dY/dX) else: myAngle=0 return (3,0,myAngle) | b9aca6a019f1e02fdda02f66dcfacc9eb65f7287 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/b9aca6a019f1e02fdda02f66dcfacc9eb65f7287/followupPDSurface.py |
myAngle=(2.0*pi)-arcsin(dY/dX) | myAngle=(2.0*pi)-arctan(dY/dX) | def scatterPointer(A=None,B=None): """ Expects two ordered pairs as tuples (x1,y1) and (x2,y2)... Then this defines an angle. From this angle we orient a triangle point and return this as a tuple of requested size. This can be plotted by the scatter plot to point in a particular orientation. The direction right(0,0)->(1,0) is angle 0 radians, then we rotate counter clockwise from there... What is returned in a three element tuple (numsides,style,angle) which can be put into a plot call via marker=X as a **kwarg """ if A == None or B == None: return (3,0,0) if( A != type(tuple()) and len(A) != 2) \ or \ ( B != type(tuple()) and len(B) != 2): return (3,0,0) # # Calculate orientation of triangle # Ang = arcsin(dY/dX) dY=float(B[-1]-A[-1]) dX=float(B[0]-A[0]) if dY>=0 and dX>=0: myAngle=arcsin(dY/dX) elif dY>=0 and dX<0: myAngle=pi-arcsin(dY/dX) elif dY<0 and dX<0: myAngle=arcsin(dY/dX)+pi elif dY<0 and dX>0: myAngle=(2.0*pi)-arcsin(dY/dX) else: myAngle=0 return (3,0,myAngle) | b9aca6a019f1e02fdda02f66dcfacc9eb65f7287 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/b9aca6a019f1e02fdda02f66dcfacc9eb65f7287/followupPDSurface.py |
type="string",default=None,\ | type="string",default="dummy",\ | def convert2DHistTo3ColVectors(myMatrix,xBins=None,yBins=None): """ Create three vectors X,Y,Z for ease of use in functs like contour, and scatter instead of imshow! If [x,y]Bins==None use bin index for X, Y Input matrix assumed square!! """ if xBins==None: xBins=range(len(myMatrix)) if yBins==None: yBins=range(len(myMatrix[0])) if len(xBins)*len(yBins) != myMatrix.size: raise Exception, "Input matrix and bin vectors disagree!" tX=empty(myMatrix.size,type(xBins[0])) tY=empty(myMatrix.size,type(yBins[0])) tZ=empty(myMatrix.size,type(myMatrix[0][0])) myIndex=0 for ii,iVal in enumerate(xBins): for jj,jVal in enumerate(yBins): tX[myIndex]=iVal tY[myIndex]=jVal tZ[myIndex]=myMatrix[ii][jj] myIndex=myIndex+1 return tX,tY,tZ | b9aca6a019f1e02fdda02f66dcfacc9eb65f7287 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/b9aca6a019f1e02fdda02f66dcfacc9eb65f7287/followupPDSurface.py |
origData=dict() origData["beam"]=beamSpigot.getDataStream(beamName,gpsStart,gpsEnd) | origData=dict() | def convert2DHistTo3ColVectors(myMatrix,xBins=None,yBins=None): """ Create three vectors X,Y,Z for ease of use in functs like contour, and scatter instead of imshow! If [x,y]Bins==None use bin index for X, Y Input matrix assumed square!! """ if xBins==None: xBins=range(len(myMatrix)) if yBins==None: yBins=range(len(myMatrix[0])) if len(xBins)*len(yBins) != myMatrix.size: raise Exception, "Input matrix and bin vectors disagree!" tX=empty(myMatrix.size,type(xBins[0])) tY=empty(myMatrix.size,type(yBins[0])) tZ=empty(myMatrix.size,type(myMatrix[0][0])) myIndex=0 for ii,iVal in enumerate(xBins): for jj,jVal in enumerate(yBins): tX[myIndex]=iVal tY[myIndex]=jVal tZ[myIndex]=myMatrix[ii][jj] myIndex=myIndex+1 return tX,tY,tZ | b9aca6a019f1e02fdda02f66dcfacc9eb65f7287 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/b9aca6a019f1e02fdda02f66dcfacc9eb65f7287/followupPDSurface.py |
tmpData=beamSpigot.getDataStream(myLabel[myKey],gpsA,gpsB) mySnipData[myKey]=interp(mySnipData["time"], getTimeStamps(tmpData), tmpData) | if beamName == "dummy" and myKey == "beam": mySnipData[myKey]=ones(size(mySnipData["time"])) else: tmpData=beamSpigot.getDataStream(myLabel[myKey],gpsA,gpsB) mySnipData[myKey]=interp(mySnipData["time"], getTimeStamps(tmpData), tmpData) | def convert2DHistTo3ColVectors(myMatrix,xBins=None,yBins=None): """ Create three vectors X,Y,Z for ease of use in functs like contour, and scatter instead of imshow! If [x,y]Bins==None use bin index for X, Y Input matrix assumed square!! """ if xBins==None: xBins=range(len(myMatrix)) if yBins==None: yBins=range(len(myMatrix[0])) if len(xBins)*len(yBins) != myMatrix.size: raise Exception, "Input matrix and bin vectors disagree!" tX=empty(myMatrix.size,type(xBins[0])) tY=empty(myMatrix.size,type(yBins[0])) tZ=empty(myMatrix.size,type(myMatrix[0][0])) myIndex=0 for ii,iVal in enumerate(xBins): for jj,jVal in enumerate(yBins): tX[myIndex]=iVal tY[myIndex]=jVal tZ[myIndex]=myMatrix[ii][jj] myIndex=myIndex+1 return tX,tY,tZ | b9aca6a019f1e02fdda02f66dcfacc9eb65f7287 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/b9aca6a019f1e02fdda02f66dcfacc9eb65f7287/followupPDSurface.py |
size=50,color='white') | size=starSize,color='white') | def convert2DHistTo3ColVectors(myMatrix,xBins=None,yBins=None): """ Create three vectors X,Y,Z for ease of use in functs like contour, and scatter instead of imshow! If [x,y]Bins==None use bin index for X, Y Input matrix assumed square!! """ if xBins==None: xBins=range(len(myMatrix)) if yBins==None: yBins=range(len(myMatrix[0])) if len(xBins)*len(yBins) != myMatrix.size: raise Exception, "Input matrix and bin vectors disagree!" tX=empty(myMatrix.size,type(xBins[0])) tY=empty(myMatrix.size,type(yBins[0])) tZ=empty(myMatrix.size,type(myMatrix[0][0])) myIndex=0 for ii,iVal in enumerate(xBins): for jj,jVal in enumerate(yBins): tX[myIndex]=iVal tY[myIndex]=jVal tZ[myIndex]=myMatrix[ii][jj] myIndex=myIndex+1 return tX,tY,tZ | b9aca6a019f1e02fdda02f66dcfacc9eb65f7287 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/b9aca6a019f1e02fdda02f66dcfacc9eb65f7287/followupPDSurface.py |
size=50,color='white') | size=starSize,color='white') | def convert2DHistTo3ColVectors(myMatrix,xBins=None,yBins=None): """ Create three vectors X,Y,Z for ease of use in functs like contour, and scatter instead of imshow! If [x,y]Bins==None use bin index for X, Y Input matrix assumed square!! """ if xBins==None: xBins=range(len(myMatrix)) if yBins==None: yBins=range(len(myMatrix[0])) if len(xBins)*len(yBins) != myMatrix.size: raise Exception, "Input matrix and bin vectors disagree!" tX=empty(myMatrix.size,type(xBins[0])) tY=empty(myMatrix.size,type(yBins[0])) tZ=empty(myMatrix.size,type(myMatrix[0][0])) myIndex=0 for ii,iVal in enumerate(xBins): for jj,jVal in enumerate(yBins): tX[myIndex]=iVal tY[myIndex]=jVal tZ[myIndex]=myMatrix[ii][jj] myIndex=myIndex+1 return tX,tY,tZ | b9aca6a019f1e02fdda02f66dcfacc9eb65f7287 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/b9aca6a019f1e02fdda02f66dcfacc9eb65f7287/followupPDSurface.py |
facecolor=None,\ | def convert2DHistTo3ColVectors(myMatrix,xBins=None,yBins=None): """ Create three vectors X,Y,Z for ease of use in functs like contour, and scatter instead of imshow! If [x,y]Bins==None use bin index for X, Y Input matrix assumed square!! """ if xBins==None: xBins=range(len(myMatrix)) if yBins==None: yBins=range(len(myMatrix[0])) if len(xBins)*len(yBins) != myMatrix.size: raise Exception, "Input matrix and bin vectors disagree!" tX=empty(myMatrix.size,type(xBins[0])) tY=empty(myMatrix.size,type(yBins[0])) tZ=empty(myMatrix.size,type(myMatrix[0][0])) myIndex=0 for ii,iVal in enumerate(xBins): for jj,jVal in enumerate(yBins): tX[myIndex]=iVal tY[myIndex]=jVal tZ[myIndex]=myMatrix[ii][jj] myIndex=myIndex+1 return tX,tY,tZ | b9aca6a019f1e02fdda02f66dcfacc9eb65f7287 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/b9aca6a019f1e02fdda02f66dcfacc9eb65f7287/followupPDSurface.py |
|
for offset_vector in self.offset_vectors: | for offset_vector in cafepacker.offset_vectors: | def split_bins(cafepacker, extentlimit): """ Split bins of stored in CafePacker until each bin has an extent no longer than extentlimit. """ # # loop overall the bins in cafepacker.bins. we pop items out of # cafepacker.bins and append new ones to the end so need a while loop # checking the extent of each bin in cafepacker.bins until all bins are # done being split # idx = 0 while idx < len(cafepacker.bins): if abs(cafepacker.bins[idx].extent) <= extentlimit: # # bin doesn't need splitting so move to next # idx += 1 continue # # split this bin so pop it out of the list # bigbin = cafepacker.bins.pop(idx) # # calculate the central time of the union of all the input # files in the bin # splittime = lsctables.LIGOTimeGPS(bigbin.extent[0] + (bigbin.extent[1] - bigbin.extent[0])/2) # # split the segmentlistdict at this time # splitseglistdict = segments.segmentlistdict() for key in bigbin.size.keys(): splitseglistdict[key] = segments.segmentlist([segments.segment(-segments.infinity(),splittime)]) # # create bins for the first and second halves # bin1 = LALCacheBin() bin1.size = bigbin.size & splitseglistdict bin1.extent = bigbin.extent & splitseglistdict.values()[0][0] bin2 = LALCacheBin() bin2.size = bigbin.size & ~splitseglistdict bin2.extent = bigbin.extent & (~splitseglistdict.values()[0])[0] # # remove unused keys from the smaller bins' segmentlistdicts # newsize = segments.segmentlistdict() for key in bin1.size.keys(): if len(bin1.size[key]): newsize[key] = bin1.size[key] bin1.size = newsize newsize = segments.segmentlistdict() for key in bin2.size.keys(): if len(bin2.size[key]): newsize[key] = bin2.size[key] bin2.size = newsize # # find which of the objects in bigbin.objects intersect the two # smaller bins # for cache in bigbin.objects: thisseglistdict = cache.to_segmentlistdict() coinc1 = 0 coinc2 = 0 for offset_vector in self.offset_vectors: # # loop over offset vectors updating the smaller # bins and the object we are checking # bin1.size.offsets.update(offset_vector) bin2.size.offsets.update(offset_vector) thisseglistdict.offsets.update(offset_vector) if not coinc1 and bin1.size.is_coincident(thisseglistdict, keys = offset_vector.keys()): # # object is coicident with bin1 # coinc1 = 1 bin1.objects.append(cache) if not coinc2 and bin2.size.is_coincident(thisseglistdict, keys = offset_vector.keys()): # # object is coincident with bin2 # coinc2 = 1 bin2.objects.append(cache) # # end loop if known to be coincident with both # bins # if coinc1 and coinc2: break # # clear offsets applied to object # thisseglistdict.offsets.clear() # # clear offsets applied to bins # bin1.size.offsets.clear() bin2.size.offsets.clear() # # append smaller bins to list of bins # cafepacker.bins.append(bin1) cafepacker.bins.append(bin2) # # do not increment idx as we popped the large bin out of # cafepacker.bins # # # sort the bins in cafepacker # cafepacker.bins.sort() return cafepacker | 4a884fa265544fa4242befa60915a19c7bcfebb7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/4a884fa265544fa4242befa60915a19c7bcfebb7/ligolw_cafe.py |
num_days = int(round((duration)/86400)) | num_days = int(round((int(duration))/86400)) | def daily_ihope_cache(start,end,ifo,cluster=None): """ Generates cache list of daily ihope INSPIRAL xml files for give ifo and clustering (None,'30ms','100ms', or '16s') between start and end time """ #== daily path ihope_daily_path = '/archive/home/cbc/ihope_daily' #== set clustering tag if cluster==None or cluster.upper()=='UNCLUSTERED': cluster_tag='UNCLUSTERED' elif cluster.upper()=='100MS': cluster_tag='100MILLISEC_CLUSTERED' elif cluster.upper()=='30MS': cluster_tag='30MILLISEC_CLUSTERED' elif cluster.upper()=='16S': cluster_tag='16SEC_CLUSTERED' #== work out days day_start = int(GetCommandOutput('tconvert `tconvert '+str(start)+\ ' -f %D`')[0]) duration = end-start num_days = int(round((duration)/86400)) #== generate array of days day_end = day_start+num_days*86400 while day_end<end: day_end+=86400 days = numpy.arange(day_start,day_end,86400) cache=[] #== loop over days gathering files for day in days: date = GetCommandOutput('tconvert '+str(day)+' -f %Y%m%d')[0]\ .replace('\n','') day_path = os.path.join(ihope_daily_path,date[0:6],date) ls_cmd = 'ls '+day_path+'/'+ifo+'-INSPIRAL_'+cluster_tag+\ '*.xml.gz' cache_out = Popen(ls_cmd,shell=True,stdout=PIPE,stderr=PIPE) for line in cache_out.stdout.readlines(): trig_start = int(line.split('.xml')[0].split('-')[-2]) duration = int(line.split('.xml')[0].split('-')[-2]) if start<=trig_start<end or start<(trig_start+duration)<=end: cache.append(line.replace('\n','')) cache_out.stdout.close() return cache | de85c159a571593db3ec7047a997bccf2f95a751 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/de85c159a571593db3ec7047a997bccf2f95a751/dqDataUtils.py |
cellString=cellString+" %s "%self.linkedRemoteImage(thumbs[ifo][myOmegaIndex], | cellString=cellString+" %s "%self.linkedRemoteImage(thumbs[ifo][myOmegaIndexT], | def insertAnalyzeQscanTable(self, images=None, thumbs=None, indexes=None, imagesAQ=None, thumbsAQ=None, indexesAQ=None, channelRanks=None): """ Insert a multiple IFO table with 5 cols with the AQ underneath this depends on the numer of IFO keys in indexes dictionary. The option channelRanks is not required to change the plot order! Channel ranks is dict similar in shape to other args. Cells are shaded light grey if they are top N channels and that the trigger is greater in value that 0.5. Assuming the channelRanks dict is not empty. """ #channelRanks={'ifo':[[chan,Zvalue,rank]...[chan,Zvalue,rank]],'ifo2':[[ ]]} #Review the keys for Qscans and analyzeQscans. if not images.keys()==thumbs.keys()==indexes.keys(): sys.stderr.write("Error: Keys for Qscan tables creations inconsistent!\n") if not imagesAQ.keys()==thumbsAQ.keys()==indexesAQ.keys(): sys.stderr.write("Error: Keys for Qscan tables creations inconsistent!\n") | 5e1f81b21a4e936f81d47d9e5881e1ba01bee541 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5e1f81b21a4e936f81d47d9e5881e1ba01bee541/makeCheckListWiki.py |
cellString=cellString+" %s "%self.linkedRemoteImage(thumbsAQ[ifo][myAQIndex], | cellString=cellString+" %s "%self.linkedRemoteImage(thumbsAQ[ifo][myAQIndexT], | def insertAnalyzeQscanTable(self, images=None, thumbs=None, indexes=None, imagesAQ=None, thumbsAQ=None, indexesAQ=None, channelRanks=None): """ Insert a multiple IFO table with 5 cols with the AQ underneath this depends on the numer of IFO keys in indexes dictionary. The option channelRanks is not required to change the plot order! Channel ranks is dict similar in shape to other args. Cells are shaded light grey if they are top N channels and that the trigger is greater in value that 0.5. Assuming the channelRanks dict is not empty. """ #channelRanks={'ifo':[[chan,Zvalue,rank]...[chan,Zvalue,rank]],'ifo2':[[ ]]} #Review the keys for Qscans and analyzeQscans. if not images.keys()==thumbs.keys()==indexes.keys(): sys.stderr.write("Error: Keys for Qscan tables creations inconsistent!\n") if not imagesAQ.keys()==thumbsAQ.keys()==indexesAQ.keys(): sys.stderr.write("Error: Keys for Qscan tables creations inconsistent!\n") | 5e1f81b21a4e936f81d47d9e5881e1ba01bee541 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/5e1f81b21a4e936f81d47d9e5881e1ba01bee541/makeCheckListWiki.py |
self.FAR = 99 | self.FAR = -1 | def __init__(self): """ here are all the things we need """ #start with data needed for every coinc self.ifo_list = [] self.ifo_coincs = [] self.snr = {} self.gps = {} self.eff_distances = {} self.mass1 = {} self.mass2 = {} self.time = None self.FAR = 99 #this stuff is only needed for injections self.is_injection = False self.latitude_inj = None self.longitude_inj = None self.mass1_inj = None self.mass2_inj = None self.distance_inj = None self.eff_distances_inj = {} | c61b0d7232ed4fa06a8cfbef8a6aafce8f37875c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/c61b0d7232ed4fa06a8cfbef8a6aafce8f37875c/skylocutils.py |
def set_FAR(self,FAR_per_day): self.FAR=FAR_per_day | def set_inj_params(self,lat,lon,m1,m2,dist,effDs): """ set all of the injection parameters at once """ self.latitude_inj = lat self.longitude_inj = lon self.mass1_inj = m1 self.mass2_inj = m2 self.distance_inj = dist self.eff_distances_inj = effDs | c61b0d7232ed4fa06a8cfbef8a6aafce8f37875c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/c61b0d7232ed4fa06a8cfbef8a6aafce8f37875c/skylocutils.py |
|
if ctab[0].false_alarm_rate is not None: coinc.set_FAR(ctab[0].false_alarm_rate) | def get_coincs_from_coinctable(self,files): """ read data from coinc tables (xml format) FIXME: currently assumes one coinc per file!!! """ for file in files: coinc = CoincData() xmldoc = utils.load_filename(file) sngltab = tab.get_table(xmldoc,lsctables.SnglInspiralTable.tableName) coinc.set_snr(dict((row.ifo, row.snr) for row in sngltab)) coinc.set_gps(dict((row.ifo, LIGOTimeGPS(row.get_end())) for row in sngltab)) coinc.set_effDs(dict((row.ifo,row.eff_distance) for row in sngltab)) coinc.set_masses(dict((row.ifo, row.mass1) for row in sngltab), \ dict((row.ifo, row.mass2) for row in sngltab)) ctab = tab.get_table(xmldoc,lsctables.CoincInspiralTable.tableName) coinc.set_ifos(list(ctab[0].get_ifos())) if ctab[0].false_alarm_rate is not None: coinc.set_FAR(ctab[0].false_alarm_rate) | c61b0d7232ed4fa06a8cfbef8a6aafce8f37875c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/c61b0d7232ed4fa06a8cfbef8a6aafce8f37875c/skylocutils.py |
|
while os.path.isfile(base_name): base_name = base_name + '_' + str(counter) + ext | while os.path.isfile(name): name = base_name + '_' + str(counter) + ext | def get_unique_filename(name): """ use this to avoid name collisions """ counter = 1 base_name, ext = os.path.splitext(name) while os.path.isfile(base_name): base_name = base_name + '_' + str(counter) + ext counter += 1 return base_name + ext | ed570091132aaa77d9da802494c40b42b3357586 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/ed570091132aaa77d9da802494c40b42b3357586/run_skypoints.py |
return base_name + ext | return name | def get_unique_filename(name): """ use this to avoid name collisions """ counter = 1 base_name, ext = os.path.splitext(name) while os.path.isfile(base_name): base_name = base_name + '_' + str(counter) + ext counter += 1 return base_name + ext | ed570091132aaa77d9da802494c40b42b3357586 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/ed570091132aaa77d9da802494c40b42b3357586/run_skypoints.py |
def append_process(xmldoc, comment = None, force = None, ds_sq_threshold = None, save_small_coincs = None, vetoes_name = None, verbose = None): | def append_process(xmldoc, comment = None, force = None, ds_sq_threshold = None, save_small_coincs = None, vetoes_name = None, coinc_end_time_segment = None, verbose = None): | def append_process(xmldoc, comment = None, force = None, ds_sq_threshold = None, save_small_coincs = None, vetoes_name = None, verbose = None): process = llwapp.append_process(xmldoc, program = process_program_name, version = __version__, cvs_repository = u"lscsoft", cvs_entry_time = __date__, comment = comment) params = [ (u"--ds-sq-threshold", u"real_8", ds_sq_threshold) ] if comment is not None: params += [(u"--comment", u"lstring", comment)] if force is not None: params += [(u"--force", None, None)] if save_small_coincs is not None: params += [(u"--save-small-coincs", None, None)] if vetoes_name is not None: params += [(u"--vetoes-name", u"lstring", vetoes_name)] if verbose is not None: params += [(u"--verbose", None, None)] ligolw_process.append_process_params(xmldoc, process, params) return process | 33c375988e5fbb955cb57c83b181ddc3e9420c4e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/33c375988e5fbb955cb57c83b181ddc3e9420c4e/ligolw_rinca.py |
def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): | def append_coinc(self, process_id, node, coinc_def_id, events): | def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables # | 33c375988e5fbb955cb57c83b181ddc3e9420c4e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/33c375988e5fbb955cb57c83b181ddc3e9420c4e/ligolw_rinca.py |
time_slide_id = node.time_slide_id | def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables # | 33c375988e5fbb955cb57c83b181ddc3e9420c4e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/33c375988e5fbb955cb57c83b181ddc3e9420c4e/ligolw_rinca.py |
|
tstart = events[0].get_start() + self.time_slide_index[time_slide_id][events[0].ifo] coinc_ringdown.set_start(tstart + sum(event.snr * float(event.get_start() + self.time_slide_index[time_slide_id][event.ifo] - tstart) for event in events) / sum(event.snr for event in events)) | tstart = coinc_ringdown_start(events, node.offset_vector) coinc_ringdown.set_start(tstart) | def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables # | 33c375988e5fbb955cb57c83b181ddc3e9420c4e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/33c375988e5fbb955cb57c83b181ddc3e9420c4e/ligolw_rinca.py |
tstart = coinc_ringdown.get_start() | def append_coinc(self, process_id, time_slide_id, coinc_def_id, events): # # populate the coinc_event and coinc_event_map tables # | 33c375988e5fbb955cb57c83b181ddc3e9420c4e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/33c375988e5fbb955cb57c83b181ddc3e9420c4e/ligolw_rinca.py |
|
coinc_tables.append_coinc(process_id, node.time_slide_id, coinc_def_id, ntuple) | coinc_tables.append_coinc(process_id, node, coinc_def_id, ntuple) | def ligolw_rinca( xmldoc, process_id, EventListType, CoincTables, coinc_definer_row, event_comparefunc, thresholds, ntuple_comparefunc = lambda events, offset_vector: False, small_coincs = False, veto_segments = None, verbose = False | 33c375988e5fbb955cb57c83b181ddc3e9420c4e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/33c375988e5fbb955cb57c83b181ddc3e9420c4e/ligolw_rinca.py |
coinc_tables.append_coinc(process_id, node.time_slide_id, coinc_def_id, ntuple) | coinc_tables.append_coinc(process_id, node, coinc_def_id, ntuple) | def ligolw_rinca( xmldoc, process_id, EventListType, CoincTables, coinc_definer_row, event_comparefunc, thresholds, ntuple_comparefunc = lambda events, offset_vector: False, small_coincs = False, veto_segments = None, verbose = False | 33c375988e5fbb955cb57c83b181ddc3e9420c4e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/33c375988e5fbb955cb57c83b181ddc3e9420c4e/ligolw_rinca.py |
def __init__(self, config=None): | def __init__(self, configfile=None): | def __init__(self, config=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind") | 7b60b47b4fbaad3d8fa52b8e122b7a950318aa85 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/7b60b47b4fbaad3d8fa52b8e122b7a950318aa85/stfu_pipe.py |
cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) | cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) | def __init__(self, config=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind") | 7b60b47b4fbaad3d8fa52b8e122b7a950318aa85 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/7b60b47b4fbaad3d8fa52b8e122b7a950318aa85/stfu_pipe.py |
if config: | if configfile: | def __init__(self, config=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind") | 7b60b47b4fbaad3d8fa52b8e122b7a950318aa85 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/7b60b47b4fbaad3d8fa52b8e122b7a950318aa85/stfu_pipe.py |
user_cp.read(config) | user_cp.read(configfile) | def __init__(self, config=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind") | 7b60b47b4fbaad3d8fa52b8e122b7a950318aa85 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/7b60b47b4fbaad3d8fa52b8e122b7a950318aa85/stfu_pipe.py |
if user_cp: self.overwrite_config(user_cp) | if user_cp: self.overwrite_config(user_cp,cp) | def __init__(self, config=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind") | 7b60b47b4fbaad3d8fa52b8e122b7a950318aa85 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/3592/7b60b47b4fbaad3d8fa52b8e122b7a950318aa85/stfu_pipe.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.