rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
self.add_var_opt(param,value)
def __init__(self, dag, job, cp, opts, sngl, frame_cache, chia, tag, p_nodes=[]):
1d62e8501fe5a09856b38aebf1337aced30aab29 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/1d62e8501fe5a09856b38aebf1337aced30aab29/stfu_pipe.py
inspiral.ChiaNode.__init__(self,job)
pipeline.CondorDAGNode.__init__(self,job) pipeline.AnalysisNode.__init__(self)
def __init__(self, dag, job, cp, opts, coinc, inspiral_node_dict, chia_node =None, p_nodes = []):
1d62e8501fe5a09856b38aebf1337aced30aab29 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/1d62e8501fe5a09856b38aebf1337aced30aab29/stfu_pipe.py
background_livetime.setdefault(key, {})
background_livetime.setdefault(key, {})
def background_livetime_nonring_by_slide(connection, seglists, veto_segments=None, verbose = False): # get the segment lists and live time # FIXME veto segments not handled yet zero_lag_time_slides, background_time_slides = SnglBurstUtils.get_time_slides(connection) instruments = frozenset(seglists.keys()) background_livetime = {} for on_inst, off_inst in detector_combos(list(instruments)): on_inst = frozenset(on_inst) off_inst = frozenset(off_inst) key = on_inst old_offsets = seglists.offsets.copy() background_livetime.setdefault(key, {}) for id, time_slide in background_time_slides.items(): seglists.offsets.update(time_slide) segs=seglists.intersection(list(on_inst))-seglists.union(list(off_inst)) tskey = frozenset(time_slide.items()) background_livetime[key].setdefault(tskey,0) background_livetime[key][tskey] += float(abs(segs)) seglists.offsets.update(old_offsets) return background_livetime
a1ce546623ebcaadbd3b00da61a6cbcccc6f4d26 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/a1ce546623ebcaadbd3b00da61a6cbcccc6f4d26/farutils.py
offset_vectors = db_thinca_rings.get_background_offset_vectors(connection)
offset_vectors = db_thinca_rings.get_background_offset_vectors(connection)
def background_livetime_ring_by_slide(connection, live_time_program, seglists, veto_segments, verbose = False): background_livetime = {} instruments = frozenset(seglists.keys()) offset_vectors = db_thinca_rings.get_background_offset_vectors(connection) # first work out time slide live time for on_instruments, livetimes in db_thinca_rings.get_thinca_livetimes(db_thinca_rings.get_thinca_rings_by_available_instruments(connection, program_name = live_time_program), veto_segments, offset_vectors, verbose = verbose).items(): on_instruments = frozenset(on_instruments)#lsctables.ifos_from_instrument_set(on_instruments) for offset, lt in zip(offset_vectors,livetimes): background_livetime.setdefault(on_instruments,{}) key = frozenset(offset.items()) background_livetime[on_instruments].setdfault(key, 0) background_livetime[on_instruments][key] += lt return background_livetime
a1ce546623ebcaadbd3b00da61a6cbcccc6f4d26 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/a1ce546623ebcaadbd3b00da61a6cbcccc6f4d26/farutils.py
else:
else:
def playground_nonplayground_livetime(seglists, playground_segs=None, verbose=False): playground_livetime = {} nonplayground_livetime = {} instruments = frozenset(seglists.keys()) for on_inst, off_inst in detector_combos(list(instruments)): on_inst = frozenset(on_inst) off_inst = frozenset(off_inst) key = lsctables.ifos_from_instrument_set(on_inst) selected_segs = seglists.intersection(list(on_inst))-seglists.union(list(off_inst)) if playground_segs: playground_livetime[on_inst] = float(abs(selected_segs & playground_segs)) nonplayground_livetime[on_inst] = float(abs(selected_segs - playground_segs)) else: playground_livetime[on_inst] = 0 nonplayground_livetime[on_inst] = float(abs(selected_segs)) return playground_livetime, nonplayground_livetime
a1ce546623ebcaadbd3b00da61a6cbcccc6f4d26 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/a1ce546623ebcaadbd3b00da61a6cbcccc6f4d26/farutils.py
def get_veto_segments(connection, program_name, veto_segments_name=None): veto_segments = segments.segmentlistdict() #FIXME only handles thinca case if not veto_segments_name: return veto_segments if program_name == "thinca": veto_segments = db_thinca_rings.get_veto_segments(connection, veto_segments_name) return veto_segments
a1ce546623ebcaadbd3b00da61a6cbcccc6f4d26 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/a1ce546623ebcaadbd3b00da61a6cbcccc6f4d26/farutils.py
def get_segments(connection, xmldoc, program_name):
def get_segments(connection, xmldoc, program_name):
def get_segments(connection, xmldoc, program_name): seglists = segments.segmentlistdict() if program_name == "thinca": seglists = db_thinca_rings.get_thinca_zero_lag_segments(connection, program_name) if program_name == "gstlal_inspiral": seglists = llwapp.segmentlistdict_fromsearchsummary(xmldoc, program_name).coalesce() return seglists
a1ce546623ebcaadbd3b00da61a6cbcccc6f4d26 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/a1ce546623ebcaadbd3b00da61a6cbcccc6f4d26/farutils.py
twoDGreedyCL['ra_sb,dec_sb']=skyinjectionconfidence if min_sky_area_containing_injection: twoDGreedyInj['ra_sb,dec_sb']=min_sky_area_containing_injection
twoDGreedyInj['ra_sb,dec_sb']={} twoDGreedyInj['ra_sb,dec_sb']['confidence']=min_sky_area_containing_injection if min_sky_area_containing_injection: twoDGreedyInj['ra_sb,dec_sb']['area']=min_sky_area_containing_injection
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # twoDGreedyCL={} twoDGreedyInj={} #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,toppoints,skyinjectionconfidence,min_sky_area_containing_injection=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) if skyinjectionconfidence: twoDGreedyCL['ra_sb,dec_sb']=skyinjectionconfidence if min_sky_area_containing_injection: twoDGreedyInj['ra_sb,dec_sb']=min_sky_area_containing_injection # Add bayes factor information to summary file summary_file.add_section('bayesfactor') if bayesfactornoise is not None: bfile=open(bayesfactornoise,'r') BSN=bfile.read() bfile.close() summary_file.set('bayesfactor','BSN',BSN) if bayesfactorcoherent is not None: bfile=open(bayesfactorcoherent,'r') BCI=bfile.read() bfile.close() summary_file.set('bayesfactor','BCI',BCI) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') if bayesfactornoise is not None: htmlfile.write('<p>log Bayes factor ( coherent vs gaussian noise) = %s, Bayes factor=%f</p>'%(BSN,exp(float(BSN)))) if bayesfactorcoherent is not None: htmlfile.write('<p>log Bayes factor ( coherent vs incoherent OR noise ) = %s, Bayes factor=%f</p>'%(BCI,exp(float(BCI)))) htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1 width=100%><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) figname=param+'.png' oneDplotPath=os.path.join(outdir,figname) plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,figname.replace('.png','_samps.png'))) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(figname) htmlfile.write('<table><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th>') for plotPath in oneDplotPaths: htmlfile.write('<tr><td><img src="'+plotPath+'"></td><td><img src="'+plotPath.replace('.png','_samps.png')+'"></td>') htmlfile.write('</table>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
130fc48b9e1771d79cd18fd3d01a9e30a5de57b4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/130fc48b9e1771d79cd18fd3d01a9e30a5de57b4/cbcBayesSkyRes.py
self.scan_type = type
self.scan_type = type.replace("seismic","seis").upper()
def __init__(self, dag, job, cp, opts, ifo, p_nodes=[], type=""):
fb58c7d349a492c8a0f47de21dc07f423e29b187 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/fb58c7d349a492c8a0f47de21dc07f423e29b187/stfu_pipe.py
libraries=['boost_python-mt']
libraries=['boost_python']
def pkgconfig(*packages, **kw): import commands flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'} for token in commands.getoutput("pkg-config --libs --cflags %s" % ' '.join(packages)).split(): kw.setdefault(flag_map.get(token[:2]), []).append(token[2:]) return kw
e407fee51cc42713b142dc073e9174281c344a46 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/e407fee51cc42713b142dc073e9174281c344a46/setup.py
html_tcmp_write+='<td width="30%"><img width="100%" src="2Dkde/'+twoDKdePath+'"/></td>'
html_tcmp_write+='<td width="30%"><img width="100%" src="2Dkde/'+figname+'"/></td>'
def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpage from a file containing posterior samples generated by the parameter estimation codes with 1D/2D plots and stats from the marginal posteriors for each parameter/set of parameters. """ if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # commonOutputFileObj=open(data[0]) #Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() ## Load Bayes factors ## # Add Bayes factor information to summary file # if bayesfactornoise is not None: bfile=open(bayesfactornoise,'r') BSN=bfile.read() bfile.close() print 'BSN: %s'%BSN if bayesfactorcoherent is not None: bfile=open(bayesfactorcoherent,'r') BCI=bfile.read() bfile.close() print 'BCI: %s'%BCI #Create an instance of the posterior class using the posterior values loaded #from the file and any injection information (if given). pos = bppu.Posterior(commonOutputFileObj,SimInspiralTableEntry=injection) if ('mc' in pos.names or 'mchirp' in pos.names) and \ 'eta' in pos.names and \ ('mass1' not in pos.names or 'm1' not in pos.names) and\ ('m2' not in pos.names or 'm2' not in pos.names): if 'mc' in pos.names: mchirp_name='mc' else: mchirp_name='mchirp' if injection: inj_mass1,inj_mass2=bppu.mc2ms(injection.mchirp,injection.eta) mass1_samps,mass2_samps=bppu.mc2ms(pos[mchirp_name].samples,pos['eta'].samples) mass1_pos=bppu.OneDPosterior('m1',mass1_samps,injected_value=inj_mass1) mass2_pos=bppu.OneDPosterior('m2',mass2_samps,injected_value=inj_mass2) pos.append(mass1_pos) pos.append(mass2_pos) ##Print some summary stats for the user...## #Number of samples print "Number of posterior samples: %i"%len(pos) # Means print 'Means:' print str(pos.means) #Median print 'Median:' print str(pos.medians) #maxL print 'maxL:' max_pos,max_pos_co=pos.maxL print max_pos_co #==================================================================# #Create web page #==================================================================# html=bppu.htmlPage('Posterior PDFs') #Create a section for meta-data/run information html_meta=html.add_section('Summary') html_meta.p('Produced from '+str(len(pos))+' posterior samples.') html_meta.p('Samples read from %s'%(data[0])) #Create a section for model selection results (if they exist) if bayesfactornoise is not None: html_model=html.add_section('Model selection') html_model.p('log Bayes factor ( coherent vs gaussian noise) = %s, Bayes factor=%f'%(BSN,exp(float(BSN)))) if bayesfactorcoherent is not None: html_model.p('log Bayes factor ( coherent vs incoherent OR noise ) = %s, Bayes factor=%f'%(BCI,exp(float(BCI)))) #Create a section for summary statistics html_stats=html.add_section('Summary statistics') html_stats.write(str(pos)) #==================================================================# #Generate sky map #==================================================================# #If sky resolution parameter has been specified try and create sky map... skyreses=None sky_injection_cl=None if skyres is not None and 'ra' in pos.names and 'dec' in pos.names: #Greedy bin sky samples (ra,dec) into a grid on the sky which preserves #? top_ranked_sky_pixels,sky_injection_cl,skyreses,injection_area=bppu.greedy_bin_sky(pos,skyres,confidence_levels) print "BCI for sky area:" print skyreses #Create sky map in outdir bppu.plot_sky_map(top_ranked_sky_pixels,outdir) #Create a web page section for sky localization results/plots html_sky=html.add_section('Sky Localization') if injection: if sky_injection_cl: html_sky.p('Injection found at confidence interval %f in sky location'%(sky_injection_cl)) else: html_sky.p('Injection not found in posterior bins in sky location!') html_sky.write('<img width="35%" src="skymap.png"/>') if skyres is not None: html_sky_write='<table border="1"><tr><th>Confidence region</th><th>size (sq. deg)</th></tr>' fracs=skyreses.keys() fracs.sort() skysizes=[skyreses[frac] for frac in fracs] for frac,skysize in zip(fracs,skysizes): html_sky_write+=('<tr><td>%f</td><td>%f</td></tr>'%(frac,skysize)) html_sky_write+=('</table>') html_sky.write(html_sky_write) #==================================================================# #2D posteriors #==================================================================# #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. #Make a folder for the 2D kde plots margdir=os.path.join(outdir,'2Dkde') if not os.path.isdir(margdir): os.makedirs(margdir) twobinsdir=os.path.join(outdir,'2Dbins') if not os.path.isdir(twobinsdir): os.makedirs(twobinsdir) #Add a section to the webpage for a table of the confidence interval #results. html_tcig=html.add_section('2D confidence intervals (greedy binning)') #Generate the top part of the table html_tcig_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_tcig_write+='<th>%f</th>'%cl if injection: html_tcig_write+='<th>Injection Confidence Level</th>' html_tcig_write+='<th>Injection Confidence Interval</th>' html_tcig_write+='</tr>' #= Add a section for a table of 2D marginal PDFs (kde) html_tcmp=html.add_section('2D Marginal PDFs') html_tcmp.br() #Table matter html_tcmp_write='<table border="1" width="100%">' row_count=0 for par1_name,par2_name in twoDGreedyMenu: par1_name=par1_name.lower() par2_name=par2_name.lower() print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) try: pos[par1_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par1_name continue try: pos[par2_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par2_name continue #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Form greedy binning input structure greedy2Params={par1_name:par1_bin,par2_name:par2_bin} #Greedy bin the posterior samples toppoints,injection_cl,reses,injection_area=\ bppu.greedy_bin_two_param(pos,greedy2Params,confidence_levels) print "BCI %s-%s:"%(par1_name,par2_name) print reses #Generate new BCI html table row BCItableline='<tr><td>%s-%s</td>'%(par1_name,par2_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injection_cl is not None: BCItableline+='<td>%f</td>'%injection_cl BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_tcig_write+=BCItableline #= Plot 2D histograms of greedily binned points =# #greedy2PlotFig=bppu.plot_two_param_greedy_bins(np.array(toppoints),pos,greedy2Params) #greedy2PlotFig.savefig(os.path.join(twobinsdir,'%s-%s_greedy2.png'%(par1_name,par2_name))) #= Generate 2D kde plots =# print 'Generating %s-%s plot'%(par1_name,par2_name) par1_pos=pos[par1_name].samples par2_pos=pos[par2_name].samples if (size(np.unique(par1_pos))<2 or size(np.unique(par2_pos))<2): continue plot2DkdeParams={par1_name:50,par2_name:50} myfig=bppu.plot_two_param_kde(pos,plot2DkdeParams) figname=par1_name+'-'+par2_name+'_2Dkernel.png' twoDKdePath=os.path.join(margdir,figname) if row_count==0: html_tcmp_write+='<tr>' html_tcmp_write+='<td width="30%"><img width="100%" src="2Dkde/'+twoDKdePath+'"/></td>' row_count+=1 if row_count==3: html_tcmp_write+='</tr>' row_count=0 myfig.savefig(twoDKdePath) #Finish off the BCI table and write it into the etree html_tcig_write+='</table>' html_tcig.write(html_tcig_write) #Finish off the 2D kde plot table while row_count!=0: html_tcmp_write+='<td/>' row_count+=1 if row_count==3: row_count=0 html_tcmp_write+='</tr>' html_tcmp_write+='</table>' html_tcmp.write(html_tcmp_write) #Add a link to all plots html_tcmp.br() html_tcmp.a("2Dkde/",'All 2D marginal PDFs (kde)') html_tcmp.hr() #==================================================================# #1D posteriors #==================================================================# #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. #Add section for 1D confidence intervals html_ogci=html.add_section('1D confidence intervals (greedy binning)') #Generate the top part of the table html_ogci_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_ogci_write+='<th>%f</th>'%cl if injection: html_ogci_write+='<th>Injection Confidence Level</th>' html_ogci_write+='<th>Injection Confidence Interval</th>' html_ogci_write+='</tr>' #Add section for 1D marginal PDFs and sample plots html_ompdf=html.add_section('1D marginal posterior PDFs') html_ompdf.br() #Table matter html_ompdf_write= '<table><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th></tr>' onepdfdir=os.path.join(outdir,'1Dpdf') if not os.path.isdir(onepdfdir): os.makedirs(onepdfdir) sampsdir=os.path.join(outdir,'1Dsamps') if not os.path.isdir(sampsdir): os.makedirs(sampsdir) for par_name in oneDMenu: par_name=par_name.lower() print "Binning %s to determine confidence levels ..."%par_name try: pos[par_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue binParams={par_name:par_bin} toppoints,injectionconfidence,reses,injection_area=bppu.greedy_bin_one_param(pos,binParams,confidence_levels) oneDContCL,oneDContInj = bppu.contigious_interval_one_param(pos,binParams,confidence_levels) #Generate new BCI html table row BCItableline='<tr><td>%s</td>'%(par_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injectionconfidence is not None and injection_area is not None: BCItableline+='<td>%f</td>'%injectionconfidence BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_ogci_write+=BCItableline #Generate 1D histogram/kde plots print "Generating 1D plot for %s."%par_name oneDPDFParams={par_name:50} rbins,plotFig=bppu.plot_one_param_pdf(pos,oneDPDFParams) figname=par_name+'.png' oneDplotPath=os.path.join(onepdfdir,figname) plotFig.savefig(oneDplotPath) if rbins: print "r of injected value of %s (bins) = %f"%(par_name, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) pos_samps=pos[par_name].samples plt.plot(pos_samps,'.',figure=myfig) injpar=pos[par_name].injval if injpar: if min(pos_samps)<injpar and max(pos_samps)>injpar: plt.plot([0,len(pos_samps)],[injpar,injpar],'r-.') myfig.savefig(os.path.join(sampsdir,figname.replace('.png','_samps.png'))) html_ompdf_write+='<tr><td><img src="1Dpdf/'+figname+'"/></td><td><img src="1Dsamps/'+figname.replace('.png','_samps.png')+'"/></td></tr>' html_ompdf_write+='</table>' html_ompdf.write(html_ompdf_write) html_ogci_write+='</table>' html_ogci.write(html_ogci_write) html_ogci.hr() html_ogci.br() html_ompdf.hr() html_ompdf.br() html_footer=html.add_section('') html_footer.p('Produced using cbcBayesPostProc.py at '+strftime("%Y-%m-%d %H:%M:%S")+' .') html_footer.p(git_version.verbose_msg) #Save results page resultspage=open(os.path.join(outdir,'posplots.html'),'w') resultspage.write(str(html)) # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') input_file=open(data[0]) posfile.write(input_file.read()) # posfilename2=os.path.join(outdir,'posterior_samples2.dat') pos.write_to_file(posfilename2) #Close files input_file.close() posfile.close() resultspage.close()
26f369b18f86dc93cbff7432b20d1a0333a824d0 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/26f369b18f86dc93cbff7432b20d1a0333a824d0/cbcBayesPostProc.py
for name, value in params_func(events, offsetdict, *params_func_extra_args).items():
for name, value in sorted(params_func(events, offsetdict, *params_func_extra_args).items()):
def P(self, params_func, events, offsetdict, *params_func_extra_args): P_bak = 1.0 P_inj = 1.0 for name, value in params_func(events, offsetdict, *params_func_extra_args).items(): P_bak *= self.background_rates[name](*value)[0] P_inj *= self.injection_rates[name](*value)[0] return P_bak, P_inj
b0ba186b9c05b4df6bfd6b29a84583ddaa25de5d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/b0ba186b9c05b4df6bfd6b29a84583ddaa25de5d/ligolw_burca2.py
posidx=find(wt>maxwt+log(randoms))
posidx=[i for i in range(0,size(weights)) if wt[i]>maxwt+log(randoms[i]) ]
def nest2pos(samps,weights): randoms=rand(size(samps,0)) wt=weights+samps[:,-1] maxwt=max(wt) posidx=find(wt>maxwt+log(randoms)) pos=samps[posidx,:] return pos
6725f1ab3cb24ec0178d5cc8d40d8b7a798f3daf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/6725f1ab3cb24ec0178d5cc8d40d8b7a798f3daf/combine_evidence.py
os.path.join("bin", "search_volume_by_s1_s2"),
def run(self): # remove the automatically generated user env scripts for script in ["pylal-user-env.sh", "pylal-user-env.csh"]: log.info("removing " + script ) try: os.unlink(os.path.join("etc", script)) except: pass
d10f4813c1e5c0c77a2fbd3e48515c11d39b7a30 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/d10f4813c1e5c0c77a2fbd3e48515c11d39b7a30/setup.py
def __init__(self,cp,ifo):
def __init__(self,cp,ifo,timeref):
def __init__(self,cp,ifo): depIfoIniConfig = ifo+'config' self.depIfoDir = ifo+'_qscans_config' depQscanList = ['bg-rds-qscan', 'bg-seismic-qscan']
0e0ff4ad143c0e66865518a30b44d00ea912aafb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0e0ff4ad143c0e66865518a30b44d00ea912aafb/wscan_background.py
qscanConfig = string.strip(cp.get("fu-"+depQscan, depIfoIniConfig))
qscanConfig = self.fix_config_for_science_run( cp.get("fu-"+depQscan, depIfoIniConfig).strip(), timeref )
def __init__(self,cp,ifo): depIfoIniConfig = ifo+'config' self.depIfoDir = ifo+'_qscans_config' depQscanList = ['bg-rds-qscan', 'bg-seismic-qscan']
0e0ff4ad143c0e66865518a30b44d00ea912aafb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0e0ff4ad143c0e66865518a30b44d00ea912aafb/wscan_background.py
if opts.prepare_scan_ccin2p3: for ifo in cp.get("fu-remote-jobs","remote-ifos").strip().split(","): CCRemoteScans = prepareLyonRemoteScans(cp,ifo)
def __init__(self,cp,ifo): depIfoIniConfig = ifo+'config' self.depIfoDir = ifo+'_qscans_config' depQscanList = ['bg-rds-qscan', 'bg-seismic-qscan']
0e0ff4ad143c0e66865518a30b44d00ea912aafb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0e0ff4ad143c0e66865518a30b44d00ea912aafb/wscan_background.py
if ifo in cp.get("fu-remote-jobs","remote-ifos").strip().split(",") and timeListFile:
if ifo in cp.get("fu-remote-jobs","remote-ifos").strip().split(",") and timeListFile and CCRemoteScans:
def __init__(self,cp,ifo): depIfoIniConfig = ifo+'config' self.depIfoDir = ifo+'_qscans_config' depQscanList = ['bg-rds-qscan', 'bg-seismic-qscan']
0e0ff4ad143c0e66865518a30b44d00ea912aafb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0e0ff4ad143c0e66865518a30b44d00ea912aafb/wscan_background.py
if opts.prepare_scan_ccin2p3:
if opts.prepare_scan_ccin2p3 and CCRemoteScans:
def __init__(self,cp,ifo): depIfoIniConfig = ifo+'config' self.depIfoDir = ifo+'_qscans_config' depQscanList = ['bg-rds-qscan', 'bg-seismic-qscan']
0e0ff4ad143c0e66865518a30b44d00ea912aafb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0e0ff4ad143c0e66865518a30b44d00ea912aafb/wscan_background.py
def add(self, *args): packing.Bin.add(self, *args)
def add(self, cache_entry): packing.Bin.add(self, cache_entry, cache_entry.to_segmentlistdict())
def add(self, *args): packing.Bin.add(self, *args) self.extent = self.size.extent_all() return self
6919fdcc32ae7785ae5d13621be098432be0abdf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/6919fdcc32ae7785ae5d13621be098432be0abdf/ligolw_cafe.py
new.add(cache_entry, cache_entry.to_segmentlistdict())
new.add(cache_entry)
def pack(self, cache_entry): """ Find all bins in which this glue.lal.CacheEntry instance belongs, merge them, and add this cache entry to the result. Create a new bin for this cache entry if it does not belong in any of the existing bins.
6919fdcc32ae7785ae5d13621be098432be0abdf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/6919fdcc32ae7785ae5d13621be098432be0abdf/ligolw_cafe.py
splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()]
extents = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(abs(origbin.extent)) / n) for i in range(1, n)] + [+segments.infinity()]
def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done #
6919fdcc32ae7785ae5d13621be098432be0abdf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/6919fdcc32ae7785ae5d13621be098432be0abdf/ligolw_cafe.py
print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits]
print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(extent) for extent in extents[1:-1])) extents = [segments.segment(*bounds) & origbin.extent for bounds in zip(extents[:-1], extents[1:])]
def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done #
6919fdcc32ae7785ae5d13621be098432be0abdf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/6919fdcc32ae7785ae5d13621be098432be0abdf/ligolw_cafe.py
for split in splits:
for extent in extents:
def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done #
6919fdcc32ae7785ae5d13621be098432be0abdf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/6919fdcc32ae7785ae5d13621be098432be0abdf/ligolw_cafe.py
newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap)
extent_plus_max_gap = extent.protract(cafepacker.max_gap)
def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done #
6919fdcc32ae7785ae5d13621be098432be0abdf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/6919fdcc32ae7785ae5d13621be098432be0abdf/ligolw_cafe.py
if cache_entry.segment.disjoint(bin_extent_plus_max_gap):
if cache_entry.segment.disjoint(extent_plus_max_gap):
def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done #
6919fdcc32ae7785ae5d13621be098432be0abdf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/6919fdcc32ae7785ae5d13621be098432be0abdf/ligolw_cafe.py
if cache_entry_segs.intersects_segment(bin.extent):
if cache_entry_segs.intersects_segment(extent):
def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done #
6919fdcc32ae7785ae5d13621be098432be0abdf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/6919fdcc32ae7785ae5d13621be098432be0abdf/ligolw_cafe.py
bin.objects.append(cache_entry)
newbins[-1].add(cache_entry)
def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins # idx = 0 while idx < len(cafepacker.bins): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: idx += 1 continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] # # build new bins, populate sizes and extents # newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() # # pack objects from origbin into new bins # for bin in newbins: bin_extent_plus_max_gap = bin.extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(bin_extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(bin.extent): # # object is coicident with # bin # bin.objects.append(cache_entry) break # # replace original bin with split bins. increment idx to # skip over all new bins # cafepacker.bins[idx:idx+1] = newbins idx += len(newbins) # # done #
6919fdcc32ae7785ae5d13621be098432be0abdf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/6919fdcc32ae7785ae5d13621be098432be0abdf/ligolw_cafe.py
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
34f5795fcb60d7590187acbce0f34abf68d1c803 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/34f5795fcb60d7590187acbce0f34abf68d1c803/pipeline.py
for f in input_filelist: print >>dagfile, """ <filename file="%s" link="input"/>""" % f
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
34f5795fcb60d7590187acbce0f34abf68d1c803 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/34f5795fcb60d7590187acbce0f34abf68d1c803/pipeline.py
for f in inout_filelist: print >>dagfile, """ <filename file="%s" link="inout"/>""" % f
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
34f5795fcb60d7590187acbce0f34abf68d1c803 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/34f5795fcb60d7590187acbce0f34abf68d1c803/pipeline.py
for f in output_filelist: print >>dagfile, """ <filename file="%s" link="output"/>""" % f
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
34f5795fcb60d7590187acbce0f34abf68d1c803 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/34f5795fcb60d7590187acbce0f34abf68d1c803/pipeline.py
<file-server protocol="file" url="file://" mount-point="/home/dbrown/projects/cbc/dax/ihope-dax3.0/847555570-847641970">
<file-server protocol="file" url="file://" mount-point="%s">
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
34f5795fcb60d7590187acbce0f34abf68d1c803 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/34f5795fcb60d7590187acbce0f34abf68d1c803/pipeline.py
<internal-mount-point mount-point="/home/dbrown/projects/cbc/dax/ihope-dax3.0/847555570-847641970" free-size="null" total-size="null"/>
<internal-mount-point mount-point="%s" free-size="null" total-size="null"/>
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
34f5795fcb60d7590187acbce0f34abf68d1c803 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/34f5795fcb60d7590187acbce0f34abf68d1c803/pipeline.py
<file-server protocol="file" url="file://" mount-point="/home/dbrown/projects/cbc/dax/ihope-dax3.0/847555570-847641970">
<file-server protocol="file" url="file://" mount-point="%s">
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
34f5795fcb60d7590187acbce0f34abf68d1c803 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/34f5795fcb60d7590187acbce0f34abf68d1c803/pipeline.py
<internal-mount-point mount-point="/home/dbrown/projects/cbc/dax/ihope-dax3.0/847555570-847641970" free-size="null" total-size="null"/>
<internal-mount-point mount-point="%s" free-size="null" total-size="null"/>
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
34f5795fcb60d7590187acbce0f34abf68d1c803 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/34f5795fcb60d7590187acbce0f34abf68d1c803/pipeline.py
""" % (hostname,hostname)
""" % (hostname,hostname,pwd,pwd,pwd,pwd)
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
34f5795fcb60d7590187acbce0f34abf68d1c803 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/34f5795fcb60d7590187acbce0f34abf68d1c803/pipeline.py
coincTrigs = CoincInspiralUtils.coincInspiralTable(inspTrigs,'snr')
statistic = CoincInspiralUtils.coincStatistic('snr',None,None) coincTrigs = CoincInspiralUtils.coincInspiralTable(inspTrigs,statistic)
def get_coincs_from_coire(self,files): """ uses CoincInspiralUtils to get data from old-style (coire'd) coincs """ #coincTrigs = CoincInspiralUtils.coincInspiralTable() inspTrigs = SnglInspiralUtils.ReadSnglInspiralFromFiles(files, \ mangle_event_id = True,verbose=None) #note that it's hardcoded to use snr as the statistic coincTrigs = CoincInspiralUtils.coincInspiralTable(inspTrigs,'snr') try: inspInj = SimInspiralUtils.ReadSimInspiralFromFiles(files) coincTrigs.add_sim_inspirals(inspInj) #FIXME: name the exception! except: pass
74ab9a15843355a72cded211ab0cad56b42e32fe /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/74ab9a15843355a72cded211ab0cad56b42e32fe/skylocutils.py
num_days = int(round((duration)/86400))
num_days = int(round((int(duration))/86400))
def daily_ihope_cache(start,end,ifo,cluster=None): """ Generates cache list of daily ihope INSPIRAL xml files for give ifo and clustering (None,'30ms','100ms', or '16s') between start and end time """ #== daily path ihope_daily_path = '/archive/home/cbc/ihope_daily' #== set clustering tag if cluster==None or cluster.upper()=='UNCLUSTERED': cluster_tag='UNCLUSTERED' elif cluster.upper()=='100MS': cluster_tag='100MILLISEC_CLUSTERED' elif cluster.upper()=='30MS': cluster_tag='30MILLISEC_CLUSTERED' elif cluster.upper()=='16S': cluster_tag='16SEC_CLUSTERED' #== work out days day_start = int(GetCommandOutput('tconvert `tconvert '+str(start)+\ ' -f %D`')[0]) duration = end-start num_days = int(round((duration)/86400)) #== generate array of days day_end = day_start+num_days*86400 while day_end<end: day_end+=86400 days = numpy.arange(day_start,day_end,86400) cache=[] #== loop over days gathering files for day in days: date = GetCommandOutput('tconvert '+str(day)+' -f %Y%m%d')[0]\ .replace('\n','') day_path = os.path.join(ihope_daily_path,date[0:6],date) ls_cmd = 'ls '+day_path+'/'+ifo+'-INSPIRAL_'+cluster_tag+\ '*.xml.gz' cache_out = Popen(ls_cmd,shell=True,stdout=PIPE,stderr=PIPE) for line in cache_out.stdout.readlines(): trig_start = int(line.split('.xml')[0].split('-')[-2]) duration = int(line.split('.xml')[0].split('-')[-2]) if start<=trig_start<end or start<(trig_start+duration)<=end: cache.append(line.replace('\n','')) cache_out.stdout.close() return cache
3892bc7cf4bfa45ed90124ff636e9a2c3f0f3de8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/3892bc7cf4bfa45ed90124ff636e9a2c3f0f3de8/dqDataUtils.py
xml = '<filename file="%s" />' % f
xml = '<filename file="%s" />' % os.path.basename(f)
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path
cec1a043957f226949b65b3fd770ff7c197a21fe /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cec1a043957f226949b65b3fd770ff7c197a21fe/pipeline.py
hipeJob.set_pegasus_exec_dir(os.path.join( local_exec_dir, '/'.join(os.getcwd().split('/')[-2:])))
def test_and_add_hipe_arg(hipeCommand, hipe_arg): if config.has_option("hipe-arguments",hipe_arg): hipeCommand += "--" + hipe_arg + " " + \ config.get("hipe-arguments",hipe_arg) return(hipeCommand)
c507b56a4b5b1c9fc4213cf984ff021f8293a413 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/c507b56a4b5b1c9fc4213cf984ff021f8293a413/inspiralutils.py
hipeJob.set_pegasus_exec_dir(os.path.join( local_exec_dir, '/'.join(os.getcwd().split('/')[-2:]), usertag))
def test_and_add_hipe_arg(hipeCommand, hipe_arg): if config.has_option("hipe-arguments",hipe_arg): hipeCommand += "--" + hipe_arg + " " + \ config.get("hipe-arguments",hipe_arg) return(hipeCommand)
c507b56a4b5b1c9fc4213cf984ff021f8293a413 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/c507b56a4b5b1c9fc4213cf984ff021f8293a413/inspiralutils.py
plotcp.set("pipeline","bank-suffix",bankSuffix)
def plot_setup(plotDir, config, logPath, stage, injectionSuffix, zerolagSuffix, slideSuffix, bankSuffix, cacheFile, injdirType, tag = None, ifos = None, cat = 3): """ run lalapps_plot_hipe and add job to dag plotDir = directory in which to run inspiral hipe config = config file logPath = location where log files will be written stage = which stage to run (first, second or both) injectionSuffix = the string to restrict to for injections zerolagSuffix = the string to restrict to for zero lag slideSuffix = the string to restrict to for time slides bankSuffix = the string to restrict to for bank plots cacheFile = the input cache file for plotting tag = extra tag for naming """ # make the directory for running hipe mkdir(plotDir) plotcp = copy.deepcopy(config) # set details for the common section plotcp.add_section("common") plotcp.set("common","gps-start-time", plotcp.get("input","gps-start-time") ) plotcp.set("common","gps-end-time", plotcp.get("input","gps-end-time") ) plotcp.set("common","output-path", ".") plotcp.set("common","enable-output","") plotSections = ["common", "pipeline", "condor",\ "plotinspiral", "plotinspiral-meta", \ "plotthinca", "plotthinca-meta", \ "plotnumtemplates", "plotnumtemplates-meta", \ "plotinjnum", "plotinjnum-meta", \ "plotethinca", "plotethinca-meta", \ "plotinspmissed", "plotinspmissed-meta", \ "plotinspinj", "plotinspinj-meta", \ "plotsnrchi", "plotsnrchi-meta", \ "plotinspfound", \ "plotinspiralrange", "plotinspiralrange-meta", \ "ploteffdistcut", "ploteffdistcut-meta", \ "plotinspfound", "plotcoincmissed"] for seg in plotcp.sections(): if not seg in plotSections: plotcp.remove_section(seg) plotcp.remove_option("condor","hipe") plotcp.remove_option("condor","plot") plotcp.remove_option("condor","follow") # XXX Can't yet run the plotting codes in standard universe if plotcp.get("condor","universe") == "standard": plotcp.set("condor","universe","vanilla") # set the various suffixes in pipeline plotcp.set("pipeline","injection-suffix",injectionSuffix) plotcp.set("pipeline","inj-suffix",injectionSuffix) plotcp.set("pipeline","found-suffix",injectionSuffix) plotcp.set("pipeline","missed-suffix",injectionSuffix) plotcp.set("pipeline","bank-suffix",bankSuffix) plotcp.set("pipeline","trigbank-suffix",bankSuffix) plotcp.set("pipeline","zerolag-suffix",zerolagSuffix) plotcp.set("pipeline","trig-suffix",zerolagSuffix) plotcp.set("pipeline","coinc-suffix",zerolagSuffix) plotcp.set("pipeline","slide-suffix",slideSuffix) numSlides = slide_sanity(config, ("PLAYGROUND" in slideSuffix )) plotcp.set("pipeline","num-slides", numSlides) # Adding followup options to plotinspmissed analysisstart = plotcp.get("common","gps-start-time") analysisend = plotcp.get("common","gps-end-time") analysisduration = int(analysisend) - int(analysisstart) inspmissedVetoDir = "../segments" for ifo in ifos: if cat == 2: plotcp.set("plotinspmissed","followup-vetofile-" + ifo.lower(), inspmissedVetoDir + "/" + ifo + "-CATEGORY_" + str(cat) + "_VETO_SEGS-" + analysisstart + "-" + str(analysisduration) + ".txt") else: plotcp.set("plotinspmissed","followup-vetofile-" + ifo.lower(), inspmissedVetoDir + "/" + ifo + "-COMBINED_CAT_" + str(cat) + "_VETO_SEGS-" + analysisstart + "-" + str(analysisduration) + ".txt") # Adding followup option to plotinspfound and plotinspmissed plotcp.set("plotinspfound","followup-tag",injdirType) plotcp.set("plotinspmissed","followup-tag",injdirType) # Remove options if no slide or zero lag files are available. if "NONE_AVAILABLE" in slideSuffix: if plotcp.has_option('plotsnrchi-meta','slide-program-tag'): remove_plot_meta_option(plotcp,'slide','plotsnrchi') if plotcp.has_option('ploteffdistcut-meta','slide-program-tag'): remove_plot_meta_option(plotcp,'slide','ploteffdistcut') if plotcp.has_option('plotethinca-meta','slide-program-tag'): remove_plot_meta_option(plotcp,'slide','plotethinca') if "NONE_AVAILABLE" in zerolagSuffix: if plotcp.has_option('plotsnrchi-meta','trig-program-tag'): remove_plot_meta_option(plotcp,'trig','plotsnrchi') # set the user-tag if plotcp.get("pipeline","user-tag"): usertag = plotcp.get("pipeline","user-tag") plotcp.set("pipeline","input-user-tag",usertag) usertag += plotDir.upper() else: usertag = plotDir.upper() plotcp.set("pipeline","input-user-tag","") if tag: usertag += "_" + tag plotcp.set("pipeline","user-tag",usertag) plotcp.set("common","cache-file",cacheFile) # return to the directory, write ini file and run hipe os.chdir(plotDir) iniFile = "plot_hipe_" iniFile += plotDir if tag: iniFile += "_" + tag.lower() iniFile += ".ini" plotcp.write(file(iniFile,"w")) print "Running plot hipe in directory " + plotDir print "Using zero lag sieve: " + zerolagSuffix print "Using time slide sieve: " + slideSuffix print "Using injection sieve: " + injectionSuffix print "Using bank sieve: " + bankSuffix print # work out the hipe call: plotCommand = config.get("condor","plot") plotCommand += " --log-path " + logPath plotCommand += " --config-file " + iniFile plotCommand += " --priority 10" for item in config.items("ifo-details"): plotCommand += " --" + item[0] + " " + item[1] for item in config.items("plot-arguments"): plotCommand += " --" + item[0] + " " + item[1] if stage == "first" or stage == "both": plotCommand += " --first-stage" if stage == "second" or stage == "both": plotCommand += " --second-stage" # run lalapps_inspiral_hipe make_external_call(plotCommand) # make hipe job/node plotDag = iniFile.rstrip("ini") + usertag + ".dag" plotJob = pipeline.CondorDAGManJob(plotDag, plotDir) plotNode = pipeline.CondorDAGNode(plotJob) plotNode.set_user_tag(usertag) # return to the original directory os.chdir("..") return plotNode
c507b56a4b5b1c9fc4213cf984ff021f8293a413 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/c507b56a4b5b1c9fc4213cf984ff021f8293a413/inspiralutils.py
['SUBDAG EXTERNAL', self.__name, self.__job.get_sub_file]) )
['SUBDAG EXTERNAL', self.__name, self.__job.get_sub_file()]) )
def write_job(self,fh): """ Write the DAG entry for this node's job to the DAG file descriptor. @param fh: descriptor of open DAG file. """ if isinstance(self.job(),CondorDAGManJob): # create an external subdag from this dag fh.write( ' '.join( ['SUBDAG EXTERNAL', self.__name, self.__job.get_sub_file]) ) if self.job().get_dag_directory(): fh.write( ' DIR ' + self.job().get_dag_directory() ) else: # write a regular condor job fh.write( 'JOB ' + self.__name + ' ' + self.__job.get_sub_file() ) fh.write( '\n')
f1e1aa581a55110ef3b0e5694b5483479961d7dd /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/f1e1aa581a55110ef3b0e5694b5483479961d7dd/pipeline.py
injected_cols.extend(['injected_end_time', 'injected_end_time_ns', 'injected_end_time_utc__Px_click_for_daily_ihope_xP_'])
injected_cols.extend(['injected_decisive_distance','injected_end_time', 'injected_end_time_ns', 'injected_end_time_utc__Px_click_for_daily_ihope_xP_'])
def convert_duration( duration ): return sqlutils.convert_duration( duration, convert_durations )
64ecc8995aa8c83d3ca0de584b8674ae4862a388 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/64ecc8995aa8c83d3ca0de584b8674ae4862a388/printutils.py
from glue.lal import LIGOTimeGPS
def get_pyvalue(self): return generic_get_pyvalue(self)
64ecc8995aa8c83d3ca0de584b8674ae4862a388 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/64ecc8995aa8c83d3ca0de584b8674ae4862a388/printutils.py
AND rank(""", decisive_distance, """) <= """, str(limit), """
%s""" % (limit is not None and ''.join(['AND rank(', decisive_distance, ') <= ', str(limit)]) or ''), """
def get_decisive_distance( *args ): return sorted(args)[1]
64ecc8995aa8c83d3ca0de584b8674ae4862a388 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/64ecc8995aa8c83d3ca0de584b8674ae4862a388/printutils.py
y.segment_def_cdb = x.creator_db AND \ NOT (y.start_time > %s OR %s > y.end_time) ) \
y.segment_def_cdb = x.creator_db) AND \ NOT (segment.start_time > %s OR %s > segment.end_time) \ ORDER BY segment.start_time,segment_definer.segment_def_id,segment_definer.version \
def getSciSegs(ifo=None, gpsStart=None, gpsStop=None, cut=bool(False), serverURL=None, segName="DMT-SCIENCE", seglenmin=None, segpading=0
81c52e948512e278b317a10e260fd6bef801d292 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/81c52e948512e278b317a10e260fd6bef801d292/fu_utils.py
def append_process(xmldoc, comment = None, force = None, e_thinca_parameter = None, effective_snr_factor = None, vetoes_name = None, trigger_program = None, effective_snr = None, verbose = None):
def append_process(xmldoc, comment = None, force = None, e_thinca_parameter = None, effective_snr_factor = None, vetoes_name = None, trigger_program = None, effective_snr = None, coinc_end_time_segment = None, verbose = None):
def append_process(xmldoc, comment = None, force = None, e_thinca_parameter = None, effective_snr_factor = None, vetoes_name = None, trigger_program = None, effective_snr = None, verbose = None): process = llwapp.append_process(xmldoc, program = process_program_name, version = __version__, cvs_repository = u"lscsoft", cvs_entry_time = __date__, comment = comment) params = [ (u"--e-thinca-parameter", u"real_8", e_thinca_parameter) ] if comment is not None: params += [(u"--comment", u"lstring", comment)] if force is not None: params += [(u"--force", None, None)] if effective_snr_factor is not None: params += [(u"--effective-snr-factor", u"real_8", effective_snr_factor)] if vetoes_name is not None: params += [(u"--vetoes-name", u"lstring", vetoes_name)] if trigger_program is not None: params += [(u"--trigger-program", u"lstring", trigger_program)] if effective_snr is not None: params += [(u"--effective-snr", u"lstring", effective_snr)] if verbose is not None: params += [(u"--verbose", None, None)] ligolw_process.append_process_params(xmldoc, process, params) return process
cbadb7f11bf2cc6d45cf04488cf5e6d88f2e7555 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cbadb7f11bf2cc6d45cf04488cf5e6d88f2e7555/ligolw_thinca.py
coinc_inspiral.set_end(events[0].get_end() + self.time_slide_index[time_slide_id][events[0].ifo])
coinc_inspiral.set_end(coinc_inspiral_end_time(events, self.time_slide_index[time_slide_id]))
def append_coinc(self, process_id, time_slide_id, coinc_def_id, events, effective_snr_factor): # # populate the coinc_event and coinc_event_map tables #
cbadb7f11bf2cc6d45cf04488cf5e6d88f2e7555 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cbadb7f11bf2cc6d45cf04488cf5e6d88f2e7555/ligolw_thinca.py
ntuple_comparefunc = lambda events, offset_vector: False,
ntuple_comparefunc = default_ntuple_comparefunc,
def ligolw_thinca( xmldoc, process_id, EventListType, CoincTables, coinc_definer_row, event_comparefunc, thresholds, ntuple_comparefunc = lambda events, offset_vector: False, effective_snr_factor = 250.0, veto_segments = None, trigger_program = u"inspiral", verbose = False
cbadb7f11bf2cc6d45cf04488cf5e6d88f2e7555 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/cbadb7f11bf2cc6d45cf04488cf5e6d88f2e7555/ligolw_thinca.py
if cp.has_option('followup-plotmcmc','burnin'): burnin = string.strip(cp.get('followup-plotmcmc','burnin'))
if cp.has_option('fu-plotmcmc','burnin'): burnin = string.strip(cp.get('fu-plotmcmc','burnin'))
def __init__(self,job,coinc,cp,opts,dag,ifo,ifonames,p_nodes): pipeline.CondorDAGNode.__init__(self,job)
e22c0bde31b94a74f626c9cfe8a7419b4caedf75 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/e22c0bde31b94a74f626c9cfe8a7419b4caedf75/stfu_pipe.py
plot_routine = string.strip(cp.get('followup-plotmcmc','plot_routine')) executable = string.strip(cp.get('followup-plotmcmc','executable'))
plot_routine = string.strip(cp.get('fu-plotmcmc','plot_routine')) executable = string.strip(cp.get('fu-plotmcmc','executable'))
def __init__(self,job,coinc,cp,opts,dag,ifo,ifonames,p_nodes): pipeline.CondorDAGNode.__init__(self,job)
e22c0bde31b94a74f626c9cfe8a7419b4caedf75 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/e22c0bde31b94a74f626c9cfe8a7419b4caedf75/stfu_pipe.py
["src/xlal/date.c"],
["src/xlal/date.c", "src/xlal/misc.c"],
def run(self): # remove the automatically generated user env scripts for script in ["pylal-user-env.sh", "pylal-user-env.csh"]: log.info("removing " + script ) try: os.unlink(os.path.join("etc", script)) except: pass
e0430ce899aa7d93f2182293eb13b32dc20f95e7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/e0430ce899aa7d93f2182293eb13b32dc20f95e7/setup.py
if injection:
if injection is not None and injectionconfidence is not None and injection_area is not None:
def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpage from a file containing posterior samples generated by the parameter estimation codes with 1D/2D plots and stats from the marginal posteriors for each parameter/set of parameters. """ if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # commonOutputFileObj=open(data[0]) #Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() ## Load Bayes factors ## # Add Bayes factor information to summary file # if bayesfactornoise is not None: bfile=open(bayesfactornoise,'r') BSN=bfile.read() bfile.close() print 'BSN: %s'%BSN if bayesfactorcoherent is not None: bfile=open(bayesfactorcoherent,'r') BCI=bfile.read() bfile.close() print 'BCI: %s'%BCI #Create an instance of the posterior class using the posterior values loaded #from the file and any injection information (if given). pos = bppu.Posterior(commonOutputFileObj,SimInspiralTableEntry=injection) if ('mc' in pos.names or 'mchirp' in pos.names) and \ 'eta' in pos.names and \ ('mass1' not in pos.names or 'm1' not in pos.names) and\ ('m2' not in pos.names or 'm2' not in pos.names): if 'mc' in pos.names: mchirp_name='mc' else: mchirp_name='mchirp' if injection: inj_mass1,inj_mass2=bppu.mc2ms(injection.mchirp,injection.eta) mass1_samps,mass2_samps=bppu.mc2ms(pos[mchirp_name].samples,pos['eta'].samples) mass1_pos=bppu.OneDPosterior('m1',mass1_samps,injected_value=inj_mass1) mass2_pos=bppu.OneDPosterior('m2',mass2_samps,injected_value=inj_mass2) pos.append(mass1_pos) pos.append(mass2_pos) ##Print some summary stats for the user...## #Number of samples print "Number of posterior samples: %i"%len(pos) # Means print 'Means:' print str(pos.means) #Median print 'Median:' print str(pos.medians) #maxL print 'maxL:' max_pos,max_pos_co=pos.maxL print max_pos_co #==================================================================# #Create web page #==================================================================# html=bppu.htmlPage('Posterior PDFs') #Create a section for meta-data/run information html_meta=html.add_section('Summary') html_meta.p('Produced from '+str(len(pos))+' posterior samples.') html_meta.p('Samples read from %s'%(data[0])) #Create a section for model selection results (if they exist) if bayesfactornoise is not None: html_model=html.add_section('Model selection') html_model.p('log Bayes factor ( coherent vs gaussian noise) = %s, Bayes factor=%f'%(BSN,exp(float(BSN)))) if bayesfactorcoherent is not None: html_model.p('log Bayes factor ( coherent vs incoherent OR noise ) = %s, Bayes factor=%f'%(BCI,exp(float(BCI)))) #Create a section for summary statistics html_stats=html.add_section('Summary statistics') html_stats.write(str(pos)) #==================================================================# #Generate sky map #==================================================================# #If sky resolution parameter has been specified try and create sky map... skyreses=None sky_injection_cl=None if skyres is not None and 'ra' in pos.names and 'dec' in pos.names: #Greedy bin sky samples (ra,dec) into a grid on the sky which preserves #? top_ranked_sky_pixels,sky_injection_cl,skyreses,injection_area=bppu.greedy_bin_sky(pos,skyres,confidence_levels) print "BCI for sky area:" print skyreses #Create sky map in outdir bppu.plot_sky_map(top_ranked_sky_pixels,outdir) #Create a web page section for sky localization results/plots html_sky=html.add_section('Sky Localization') if injection: if sky_injection_cl: html_sky.p('Injection found at confidence interval %f in sky location'%(sky_injection_cl)) else: html_sky.p('Injection not found in posterior bins in sky location!') html_sky.write('<img width="35%" src="skymap.png"/>') if skyres is not None: html_sky_write='<table border="1"><tr><th>Confidence region</th><th>size (sq. deg)</th></tr>' fracs=skyreses.keys() fracs.sort() skysizes=[skyreses[frac] for frac in fracs] for frac,skysize in zip(fracs,skysizes): html_sky_write+=('<tr><td>%f</td><td>%f</td></tr>'%(frac,skysize)) html_sky_write+=('</table>') html_sky.write(html_sky_write) #==================================================================# #2D posteriors #==================================================================# #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. #Make a folder for the 2D kde plots margdir=os.path.join(outdir,'2Dkde') if not os.path.isdir(margdir): os.makedirs(margdir) twobinsdir=os.path.join(outdir,'2Dbins') if not os.path.isdir(twobinsdir): os.makedirs(twobinsdir) #Add a section to the webpage for a table of the confidence interval #results. html_tcig=html.add_section('2D confidence intervals (greedy binning)') #Generate the top part of the table html_tcig_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_tcig_write+='<th>%f</th>'%cl if injection: html_tcig_write+='<th>Injection Confidence Level</th>' html_tcig_write+='<th>Injection Confidence Interval</th>' html_tcig_write+='</tr>' #= Add a section for a table of 2D marginal PDFs (kde) html_tcmp=html.add_section('2D Marginal PDFs') html_tcmp.br() #Table matter html_tcmp_write='<table border="1" width="100%">' row_count=0 for par1_name,par2_name in twoDGreedyMenu: par1_name=par1_name.lower() par2_name=par2_name.lower() print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) try: pos[par1_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par1_name continue try: pos[par2_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par2_name continue #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Form greedy binning input structure greedy2Params={par1_name:par1_bin,par2_name:par2_bin} #Greedy bin the posterior samples toppoints,injection_cl,reses,injection_area=\ bppu.greedy_bin_two_param(pos,greedy2Params,confidence_levels) print "BCI %s-%s:"%(par1_name,par2_name) print reses #Generate new BCI html table row BCItableline='<tr><td>%s-%s</td>'%(par1_name,par2_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injection_cl is not None: BCItableline+='<td>%f</td>'%injection_cl BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_tcig_write+=BCItableline #= Plot 2D histograms of greedily binned points =# #greedy2PlotFig=bppu.plot_two_param_greedy_bins(np.array(toppoints),pos,greedy2Params) #greedy2PlotFig.savefig(os.path.join(twobinsdir,'%s-%s_greedy2.png'%(par1_name,par2_name))) #= Generate 2D kde plots =# print 'Generating %s-%s plot'%(par1_name,par2_name) par1_pos=pos[par1_name].samples par2_pos=pos[par2_name].samples if (size(np.unique(par1_pos))<2 or size(np.unique(par2_pos))<2): continue plot2DkdeParams={par1_name:50,par2_name:50} myfig=bppu.plot_two_param_kde(pos,plot2DkdeParams) twoDKdePath=os.path.join(margdir,par1_name+'-'+par2_name+'_2Dkernel.png') if row_count==0: html_tcmp_write+='<tr>' html_tcmp_write+='<td width="30%"><img width="100%" src="'+twoDKdePath+'"/></td>' row_count+=1 if row_count==3: html_tcmp_write+='</tr>' row_count=0 myfig.savefig(twoDKdePath) #Finish off the BCI table and write it into the etree html_tcig_write+='</table>' html_tcig.write(html_tcig_write) #Finish off the 2D kde plot table while row_count!=0: html_tcmp_write+='<td/>' row_count+=1 if row_count==3: row_count=0 html_tcmp_write+='</tr>' html_tcmp_write+='</table>' html_tcmp.write(html_tcmp_write) #Add a link to all plots html_tcmp.br() html_tcmp.a("2D/",'All 2D Marginal PDFs') html_tcmp.hr() #==================================================================# #1D posteriors #==================================================================# #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. #Add section for 1D confidence intervals html_ogci=html.add_section('1D confidence intervals (greedy binning)') #Generate the top part of the table html_ogci_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_ogci_write+='<th>%f</th>'%cl if injection: html_ogci_write+='<th>Injection Confidence Level</th>' html_ogci_write+='<th>Injection Confidence Interval</th>' html_ogci_write+='</tr>' #Add section for 1D marginal PDFs and sample plots html_ompdf=html.add_section('1D marginal posterior PDFs') html_ompdf.br() #Table matter html_ompdf_write= '<table><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th></tr>' onepdfdir=os.path.join(outdir,'1Dpdf') if not os.path.isdir(onepdfdir): os.makedirs(onepdfdir) sampsdir=os.path.join(outdir,'1Dsamps') if not os.path.isdir(sampsdir): os.makedirs(sampsdir) for par_name in oneDMenu: par_name=par_name.lower() print "Binning %s to determine confidence levels ..."%par_name try: pos[par_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue binParams={par_name:par_bin} toppoints,injectionconfidence,reses,injection_area=bppu.greedy_bin_one_param(pos,binParams,confidence_levels) oneDContCL,oneDContInj = bppu.contigious_interval_one_param(pos,binParams,confidence_levels) #Generate new BCI html table row BCItableline='<tr><td>%s</td>'%(par_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection: BCItableline+='<td>%f</td>'%injectionconfidence BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_ogci_write+=BCItableline #Generate 1D histogram/kde plots print "Generating 1D plot for %s."%par_name oneDPDFParams={par_name:50} rbins,plotFig=bppu.plot_one_param_pdf(pos,oneDPDFParams) figname=par_name+'.png' oneDplotPath=os.path.join(onepdfdir,figname) plotFig.savefig(oneDplotPath) if rbins: print "r of injected value of %s (bins) = %f"%(par_name, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) pos_samps=pos[par_name].samples plt.plot(pos_samps,'.',figure=myfig) injpar=pos[par_name].injval if injpar: if min(pos_samps)<injpar and max(pos_samps)>injpar: plt.plot([0,len(pos_samps)],[injpar,injpar],'r-.') myfig.savefig(os.path.join(sampsdir,figname.replace('.png','_samps.png'))) html_ompdf_write+='<tr><td><img src="1Dpdf/'+figname+'"/></td><td><img src="1Dsamps/'+figname.replace('.png','_samps.png')+'"/></td></tr>' html_ompdf_write+='</table>' html_ompdf.write(html_ompdf_write) html_ogci_write+='</table>' html_ogci.write(html_ogci_write) html_ogci.hr() html_ogci.br() html_ompdf.hr() html_ompdf.br() html_footer=html.add_section('') html_footer.p('Produced using cbcBayesPostProc.py at '+strftime("%Y-%m-%d %H:%M:%S")+' .') html_footer.p(git_version.verbose_msg) #Save results page resultspage=open(os.path.join(outdir,'posplots.html'),'w') resultspage.write(str(html)) # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') input_file=open(data[0]) posfile.write(input_file.read()) # posfilename2=os.path.join(outdir,'posterior_samples2.dat') pos.write_to_file(posfilename2) #Close files input_file.close() posfile.close() resultspage.close()
51d2c38e970b4864181924b277e03e4a61ab1379 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/51d2c38e970b4864181924b277e03e4a61ab1379/cbcBayesPostProc.py
parent.job().get_pegasus_exec_dir(), '00/P1/P1.cache') )
parent.job().get_pegasus_exec_dir(), dax_basename + '_0') )
def recurse_pfn_cache(node,caches=[]): for parent in node._CondorDAGNode__parents: if isinstance(parent.job(), CondorDAGManJob): if parent.job().get_dax() is None: pass else: caches = recurse_pfn_cache(parent,caches) caches.append( os.path.join( parent.job().get_pegasus_exec_dir(), '00/P1/P1.cache') ) return caches
0de62c767e28251d804835e34c315b4215ffe393 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0de62c767e28251d804835e34c315b4215ffe393/pipeline.py
return self.get_eff_dist() * (2.**(-1./5) * ref_mass / self.mchirp)**(5./6)
return self.get_eff_dist(instrument) * (2.**(-1./5) * ref_mass / self.mchirp)**(5./6)
def get_chirp_dist(self,instrument,ref_mass = 1.40): return self.get_eff_dist() * (2.**(-1./5) * ref_mass / self.mchirp)**(5./6)
0aecc08166130d9b6cf39d4bc37e435b7398edd1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0aecc08166130d9b6cf39d4bc37e435b7398edd1/lsctables.py
self.add_var_opt("findVetoes",cp.get('findVetoes','blind'))
self.add_var_opt("blind",cp.get('findVetoes','blind'))
def __init__(self, dag, job, cp, opts, coincEvent=None): """ """ self.__conditionalLoadDefaults__(findVetosNode.defaults,cp) pipeline.CondorDAGNode.__init__(self,job) self.add_var_opt("trigger-time",coincEvent.time) #Output filename oFilename="%s-findVetos_%s_%s.wiki"%(coincEvent.instruments, coincEvent.ifos, coincEvent.time) self.add_var_opt("output-file",job.outputPath+'/DataProducts/'+oFilename) self.add_var_opt("segment-url",cp.get('findVetoes','segment-url')) self.add_var_opt("output-format",cp.get('findVetoes','output-format')) self.add_var_opt("window",cp.get('findVetoes','window')) if cp.has_option('findVetoes','estimate-background'): self.add_var_opt("estimate-background",cp.get('findVetoes','estimate-background')) if cp.has_option('findVetoes','background-location'): self.add_var_opt("background-location",cp.get('findVetoes','background-location')) if cp.has_option('findVetoes','blind'): self.add_var_opt("findVetoes",cp.get('findVetoes','blind'))
1f5139f07df547b39f2c4a9b2e2fd53567379095 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/1f5139f07df547b39f2c4a9b2e2fd53567379095/stfu_pipe.py
self.cp.set("fu-condor","qscan",home_dirs()+"/rgouaty/opt/omega/omega_r3270_glnxa64_binary/bin/wpipeline")
self.cp.set("fu-condor","qscan",stfu_pipe.home_dirs()+"/rgouaty/opt/omega/omega_r3270_glnxa64_binary/bin/wpipeline")
def set_qscan_executable(self): host = stfu_pipe.get_hostname() if 'phy.syr.edu' in host: self.cp.set("fu-condor","qscan",home_dirs()+"/rgouaty/opt/omega/omega_r3270_glnxa64_binary/bin/wpipeline") else: self.cp.set("fu-condor","qscan",home_dirs()+"/romain/opt/omega/omega_r3270_glnxa64_binary/bin/wpipeline")
dce13b8cc0e86447f38bb5cad7fb87def80618e5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/dce13b8cc0e86447f38bb5cad7fb87def80618e5/WOD_Bologna.py
self.cp.set("fu-condor","qscan",home_dirs()+"/romain/opt/omega/omega_r3270_glnxa64_binary/bin/wpipeline")
self.cp.set("fu-condor","qscan",stfu_pipe.home_dirs()+"/romain/opt/omega/omega_r3270_glnxa64_binary/bin/wpipeline") def get_cp(self): return self.cp def write(self): self.get_cp().write(open(self.ini_file,"w"))
def set_qscan_executable(self): host = stfu_pipe.get_hostname() if 'phy.syr.edu' in host: self.cp.set("fu-condor","qscan",home_dirs()+"/rgouaty/opt/omega/omega_r3270_glnxa64_binary/bin/wpipeline") else: self.cp.set("fu-condor","qscan",home_dirs()+"/romain/opt/omega/omega_r3270_glnxa64_binary/bin/wpipeline")
dce13b8cc0e86447f38bb5cad7fb87def80618e5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/dce13b8cc0e86447f38bb5cad7fb87def80618e5/WOD_Bologna.py
parser.add_option("-f", "--config-file", default="followup_pipe.ini", help="the config file, default looks for stfu_pipe.ini in path, if none is found it makes one from your environment (only provide a config file if you know you must override something)") parser.add_option("-g", "--gps-times", default='', help="Specify gps times to follow up independently of any triggers. Format --gps-times=ifos:time,ifos:time (e.g. --gps-times=H1L1:888888888.999,H2L1:787787787.987,H1H2L1:999999999.999). No segment validation is done. If there is no data for these times it will crash.")
parser.add_option("-f", "--config-file", default="WOD_Bologna.ini", help="the config file, default looks for stfu_pipe.ini in path, if none is found it makes one from your environment (only provide a config file if you know you must override something)") parser.add_option("-g", "--gps-times", default='', help="Specify gps times to follow up. Format --gps-times=ifos:time,ifos:time (e.g. --gps-times=H1L1:888888888.999,H2L1:787787787.987,H1H2L1:999999999.999). No segment validation is done. If there is no data for these times it will crash.") parser.add_option("-i", "--input-file", default='', help="Specify gps times to follow up inside a text file. Format --gps-times=myfile.txt. No segment validation is done. If there is no data for these times it will crash.")
def parse_command_line(): parser = OptionParser( version = "%prog", description = "Pipeline to setup Remote Wscans On Demand" ) parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.") parser.add_option("-f", "--config-file", default="followup_pipe.ini", help="the config file, default looks for stfu_pipe.ini in path, if none is found it makes one from your environment (only provide a config file if you know you must override something)") parser.add_option("-g", "--gps-times", default='', help="Specify gps times to follow up independently of any triggers. Format --gps-times=ifos:time,ifos:time (e.g. --gps-times=H1L1:888888888.999,H2L1:787787787.987,H1H2L1:999999999.999). No segment validation is done. If there is no data for these times it will crash.") parser.add_option("","--disable-dag-categories",action="store_true",\ default=False,help="disable the internal dag category maxjobs") parser.add_option("","--no-ht-qscan", action="store_true",\ default=False,help="disable hoft qscan nodes") parser.add_option("","--no-rds-qscan", action="store_true",\ default=False,help="disable rds qscan nodes") parser.add_option("","--no-seismic-qscan", action="store_true",\ default=False,help="disable seismic qscan nodes") parser.add_option("","--no-htQscan-datafind", action="store_true",\ default=False,help="disable hoft qscan datafind nodes") parser.add_option("","--no-rdsQscan-datafind", action="store_true",\ default=False,help="disable rds qscan datafind nodes") parser.add_option("","--do-remoteScans", action="store_true",\ default=True,help="enable the remote scans through condor flocking." \ " This option should be deprecated as soon as the condor flocking is" \ " set up on every LIGO cluster.") options, filenames = parser.parse_args() if not filenames: filenames = [] return options, (filenames or [])
dce13b8cc0e86447f38bb5cad7fb87def80618e5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/dce13b8cc0e86447f38bb5cad7fb87def80618e5/WOD_Bologna.py
default_cp = stfu_pipe.create_default_config_wod(options.config_file)
default_cp = create_default_config_wod(options.config_file)
def parse_command_line(): parser = OptionParser( version = "%prog", description = "Pipeline to setup Remote Wscans On Demand" ) parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.") parser.add_option("-f", "--config-file", default="followup_pipe.ini", help="the config file, default looks for stfu_pipe.ini in path, if none is found it makes one from your environment (only provide a config file if you know you must override something)") parser.add_option("-g", "--gps-times", default='', help="Specify gps times to follow up independently of any triggers. Format --gps-times=ifos:time,ifos:time (e.g. --gps-times=H1L1:888888888.999,H2L1:787787787.987,H1H2L1:999999999.999). No segment validation is done. If there is no data for these times it will crash.") parser.add_option("","--disable-dag-categories",action="store_true",\ default=False,help="disable the internal dag category maxjobs") parser.add_option("","--no-ht-qscan", action="store_true",\ default=False,help="disable hoft qscan nodes") parser.add_option("","--no-rds-qscan", action="store_true",\ default=False,help="disable rds qscan nodes") parser.add_option("","--no-seismic-qscan", action="store_true",\ default=False,help="disable seismic qscan nodes") parser.add_option("","--no-htQscan-datafind", action="store_true",\ default=False,help="disable hoft qscan datafind nodes") parser.add_option("","--no-rdsQscan-datafind", action="store_true",\ default=False,help="disable rds qscan datafind nodes") parser.add_option("","--do-remoteScans", action="store_true",\ default=True,help="enable the remote scans through condor flocking." \ " This option should be deprecated as soon as the condor flocking is" \ " set up on every LIGO cluster.") options, filenames = parser.parse_args() if not filenames: filenames = [] return options, (filenames or [])
dce13b8cc0e86447f38bb5cad7fb87def80618e5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/dce13b8cc0e86447f38bb5cad7fb87def80618e5/WOD_Bologna.py
gpsevents = time_only_events(options.gps_times)
if options.gps_times: gpsevents = time_only_events(options.gps_times) elif options.input_file: gpsevents = extractTimesFromFile(options.input_file) else: print >> sys.stderr, "an argument is missing in the command:\n You need to use one of the options --gps-times or --input-file" sys.exit(1)
def parse_command_line(): parser = OptionParser( version = "%prog", description = "Pipeline to setup Remote Wscans On Demand" ) parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.") parser.add_option("-f", "--config-file", default="followup_pipe.ini", help="the config file, default looks for stfu_pipe.ini in path, if none is found it makes one from your environment (only provide a config file if you know you must override something)") parser.add_option("-g", "--gps-times", default='', help="Specify gps times to follow up independently of any triggers. Format --gps-times=ifos:time,ifos:time (e.g. --gps-times=H1L1:888888888.999,H2L1:787787787.987,H1H2L1:999999999.999). No segment validation is done. If there is no data for these times it will crash.") parser.add_option("","--disable-dag-categories",action="store_true",\ default=False,help="disable the internal dag category maxjobs") parser.add_option("","--no-ht-qscan", action="store_true",\ default=False,help="disable hoft qscan nodes") parser.add_option("","--no-rds-qscan", action="store_true",\ default=False,help="disable rds qscan nodes") parser.add_option("","--no-seismic-qscan", action="store_true",\ default=False,help="disable seismic qscan nodes") parser.add_option("","--no-htQscan-datafind", action="store_true",\ default=False,help="disable hoft qscan datafind nodes") parser.add_option("","--no-rdsQscan-datafind", action="store_true",\ default=False,help="disable rds qscan datafind nodes") parser.add_option("","--do-remoteScans", action="store_true",\ default=True,help="enable the remote scans through condor flocking." \ " This option should be deprecated as soon as the condor flocking is" \ " set up on every LIGO cluster.") options, filenames = parser.parse_args() if not filenames: filenames = [] return options, (filenames or [])
dce13b8cc0e86447f38bb5cad7fb87def80618e5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/dce13b8cc0e86447f38bb5cad7fb87def80618e5/WOD_Bologna.py
print >>sys.stderr, "following up %s @ %s" % (ifo, event.time)
print >>sys.stdout, "following up %s @ %s" % (ifo, event.time)
def parse_command_line(): parser = OptionParser( version = "%prog", description = "Pipeline to setup Remote Wscans On Demand" ) parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.") parser.add_option("-f", "--config-file", default="followup_pipe.ini", help="the config file, default looks for stfu_pipe.ini in path, if none is found it makes one from your environment (only provide a config file if you know you must override something)") parser.add_option("-g", "--gps-times", default='', help="Specify gps times to follow up independently of any triggers. Format --gps-times=ifos:time,ifos:time (e.g. --gps-times=H1L1:888888888.999,H2L1:787787787.987,H1H2L1:999999999.999). No segment validation is done. If there is no data for these times it will crash.") parser.add_option("","--disable-dag-categories",action="store_true",\ default=False,help="disable the internal dag category maxjobs") parser.add_option("","--no-ht-qscan", action="store_true",\ default=False,help="disable hoft qscan nodes") parser.add_option("","--no-rds-qscan", action="store_true",\ default=False,help="disable rds qscan nodes") parser.add_option("","--no-seismic-qscan", action="store_true",\ default=False,help="disable seismic qscan nodes") parser.add_option("","--no-htQscan-datafind", action="store_true",\ default=False,help="disable hoft qscan datafind nodes") parser.add_option("","--no-rdsQscan-datafind", action="store_true",\ default=False,help="disable rds qscan datafind nodes") parser.add_option("","--do-remoteScans", action="store_true",\ default=True,help="enable the remote scans through condor flocking." \ " This option should be deprecated as soon as the condor flocking is" \ " set up on every LIGO cluster.") options, filenames = parser.parse_args() if not filenames: filenames = [] return options, (filenames or [])
dce13b8cc0e86447f38bb5cad7fb87def80618e5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/dce13b8cc0e86447f38bb5cad7fb87def80618e5/WOD_Bologna.py
ligolw += '"x\''
ligolw += '"'
def xml(self): """Convert a table dictionary to LIGO lightweight XML""" if len(self.table) == 0: raise LIGOLwDBError, 'attempt to convert empty table to xml' ligolw = """\
0a1bf8ad1efc042793c83c81328ffad32e9d7d64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0a1bf8ad1efc042793c83c81328ffad32e9d7d64/ldbd.py
ligolw += "%02x" % ord(ch) ligolw += '\'"'
ligolw += "%c" % ch ligolw += '"'
def xml(self): """Convert a table dictionary to LIGO lightweight XML""" if len(self.table) == 0: raise LIGOLwDBError, 'attempt to convert empty table to xml' ligolw = """\
0a1bf8ad1efc042793c83c81328ffad32e9d7d64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0a1bf8ad1efc042793c83c81328ffad32e9d7d64/ldbd.py
ligolw += '"'+self.strtoxml.xlat(str(tupi))+'"'
ligolw += '"'+self.strtoxml.xlat(string_format_func(tupi))+'"'
def xml(self): """Convert a table dictionary to LIGO lightweight XML""" if len(self.table) == 0: raise LIGOLwDBError, 'attempt to convert empty table to xml' ligolw = """\
0a1bf8ad1efc042793c83c81328ffad32e9d7d64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/0a1bf8ad1efc042793c83c81328ffad32e9d7d64/ldbd.py
df = float((event1.central_freq + 0.5*event1.bandwidth - event2.central_freq - 0.5*event2.bandwidth)/(event1.central_freq + 0.5*event1.bandwidth + event2.central_freq + 0.5*event2.bandwidth))
f_cut1 = event1.central_freq + event1.bandwidth / 2 f_cut2 = event2.central_freq + event2.bandwidth / 2 df = float((f_cut1 - f_cut2) / (f_cut1 + f_cut2))
def coinc_params_func(events, offsetvector): # # check for coincs that have been vetoed entirely # if len(events) < 2: return None params = {} # # zero-instrument parameters # params["nevents"] = (len(events),) # # one-instrument parameters # for event in events: prefix = "%s_" % event.ifo params["%ssnr2_chi2" % prefix] = (event.snr**2.0, event.chisq / event.chisq_dof) # # two-instrument parameters # for event1, event2 in iterutils.choices(sorted(events, key = lambda event: event.ifo), 2): assert event1.ifo != event2.ifo prefix = "%s_%s_" % (event1.ifo, event2.ifo) dt = float((event1.get_peak() + offsetvector[event1.ifo]) - (event2.get_peak() + offsetvector[event2.ifo])) params["%sdt" % prefix] = (dt,) dA = math.log10(abs(event1.amplitude / event2.amplitude)) params["%sdA" % prefix] = (dA,) df = float((event1.central_freq + 0.5*event1.bandwidth - event2.central_freq - 0.5*event2.bandwidth)/(event1.central_freq + 0.5*event1.bandwidth + event2.central_freq + 0.5*event2.bandwidth)) params["%sdf" % prefix] = (df,) # # done # return params
5b944d01768f3990f77f10fb98e03bbb8e6edf8e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/5b944d01768f3990f77f10fb98e03bbb8e6edf8e/stringutils.py
"nevents": rate.tophat_window(1)
"nevents": rate.tophat_window(1)
def dt_binning(instrument1, instrument2): dt = 0.005 + inject.light_travel_time(instrument1, instrument2) # seconds return rate.NDBins((rate.ATanBins(-dt, +dt, 3001),))
5b944d01768f3990f77f10fb98e03bbb8e6edf8e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/5b944d01768f3990f77f10fb98e03bbb8e6edf8e/stringutils.py
def get_coincparamsdistributions(xmldoc):
def get_coincparamsdistributions(xmldoc, seglists = None):
def get_coincparamsdistributions(xmldoc): coincparamsdistributions, process_id = ligolw_burca_tailor.coinc_params_distributions_from_xml(xmldoc, u"string_cusp_likelihood") return coincparamsdistributions
5b944d01768f3990f77f10fb98e03bbb8e6edf8e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/5b944d01768f3990f77f10fb98e03bbb8e6edf8e/stringutils.py
def load_likelihood_data(filenames, verbose = False):
def load_likelihood_data(filenames, seglists = None, verbose = False):
def load_likelihood_data(filenames, verbose = False): coincparamsdistributions = None for n, filename in enumerate(filenames): if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(filenames)), xmldoc = utils.load_filename(filename, gz = (filename or "stdin").endswith(".gz"), verbose = verbose) if coincparamsdistributions is None: coincparamsdistributions = get_coincparamsdistributions(xmldoc) else: coincparamsdistributions += get_coincparamsdistributions(xmldoc) xmldoc.unlink() return coincparamsdistributions
5b944d01768f3990f77f10fb98e03bbb8e6edf8e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/5b944d01768f3990f77f10fb98e03bbb8e6edf8e/stringutils.py
coincparamsdistributions = get_coincparamsdistributions(xmldoc)
coincparamsdistributions = get_coincparamsdistributions(xmldoc, seglists = seglists)
def load_likelihood_data(filenames, verbose = False): coincparamsdistributions = None for n, filename in enumerate(filenames): if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(filenames)), xmldoc = utils.load_filename(filename, gz = (filename or "stdin").endswith(".gz"), verbose = verbose) if coincparamsdistributions is None: coincparamsdistributions = get_coincparamsdistributions(xmldoc) else: coincparamsdistributions += get_coincparamsdistributions(xmldoc) xmldoc.unlink() return coincparamsdistributions
5b944d01768f3990f77f10fb98e03bbb8e6edf8e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/5b944d01768f3990f77f10fb98e03bbb8e6edf8e/stringutils.py
coincparamsdistributions += get_coincparamsdistributions(xmldoc)
coincparamsdistributions += get_coincparamsdistributions(xmldoc, seglists = seglists)
def load_likelihood_data(filenames, verbose = False): coincparamsdistributions = None for n, filename in enumerate(filenames): if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(filenames)), xmldoc = utils.load_filename(filename, gz = (filename or "stdin").endswith(".gz"), verbose = verbose) if coincparamsdistributions is None: coincparamsdistributions = get_coincparamsdistributions(xmldoc) else: coincparamsdistributions += get_coincparamsdistributions(xmldoc) xmldoc.unlink() return coincparamsdistributions
5b944d01768f3990f77f10fb98e03bbb8e6edf8e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/5b944d01768f3990f77f10fb98e03bbb8e6edf8e/stringutils.py
def __init__(self, config=None):
def __init__(self, configfile=None):
def __init__(self, config=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind")
fe67d28c671b82eaa467247c03aace975539aa65 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/fe67d28c671b82eaa467247c03aace975539aa65/stfu_pipe.py
cp.set("condor","chia", self.which("lalapps_coherent_inspiral"))
cp.set("condor","chia", self.which("lalapps_coherent_inspiral"))
def __init__(self, config=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind")
fe67d28c671b82eaa467247c03aace975539aa65 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/fe67d28c671b82eaa467247c03aace975539aa65/stfu_pipe.py
if config:
if configfile:
def __init__(self, config=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind")
fe67d28c671b82eaa467247c03aace975539aa65 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/fe67d28c671b82eaa467247c03aace975539aa65/stfu_pipe.py
user_cp.read(config)
user_cp.read(configfile)
def __init__(self, config=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind")
fe67d28c671b82eaa467247c03aace975539aa65 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/fe67d28c671b82eaa467247c03aace975539aa65/stfu_pipe.py
if user_cp: self.overwrite_config(user_cp)
if user_cp: self.overwrite_config(user_cp,cp)
def __init__(self, config=None): cp = ConfigParser.ConfigParser() self.cp = cp self.time_now = "_".join([str(i) for i in time_method.gmtime()[0:6]]) self.ini_file=self.time_now + ".ini" home_base = home_dirs() # CONDOR SECTION NEEDED BY THINGS IN INSPIRAL.PY cp.add_section("condor") cp.set("condor","datafind",self.which("ligo_data_find")) cp.set("condor","inspiral",self.which("lalapps_inspiral")) cp.set("condor","chia", self.which("lalapps_coherent_inspiral")) cp.set("condor","universe","standard") # SECTIONS TO SHUT UP WARNINGS cp.add_section("inspiral") cp.add_section("data") # DATAFIND SECTION cp.add_section("datafind")
fe67d28c671b82eaa467247c03aace975539aa65 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/fe67d28c671b82eaa467247c03aace975539aa65/stfu_pipe.py
def overwrite_config(self,config):
def overwrite_config(self,config,cp):
def overwrite_config(self,config): for section in config.sections(): if not cp.has_section(section): cp.add_section(section) for option in config.options(section): cp.set(section,option,config.get(section,option))
fe67d28c671b82eaa467247c03aace975539aa65 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/fe67d28c671b82eaa467247c03aace975539aa65/stfu_pipe.py
parent.job().get_pegasus_exec_dir(), dax_basename + '_0') )
parent.job().get_pegasus_exec_dir(), dax_basename + '_0.cache') )
def recurse_pfn_cache(node,caches=[]): for parent in node._CondorDAGNode__parents: if isinstance(parent.job(), CondorDAGManJob): if parent.job().get_dax() is None: pass else: caches = recurse_pfn_cache(parent,caches) dax_name = os.path.basename(parent.job().get_dax()) dax_basename = '.'.join(dax_name.split('.')[0:-1]) caches.append( os.path.join( parent.job().get_pegasus_exec_dir(), dax_basename + '_0') ) return caches
444df41c387b572f8d1af98af9fe7770169ad36d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/444df41c387b572f8d1af98af9fe7770169ad36d/pipeline.py
if code>0:
if code>0 and len(err)>0:
def system_call(item, command, divert_output_to_log = True): """ Makes a system call. @params item: a text specifying the content of the text (e.g. number of the GRB the message is associated with) (see also 'info') @params command: the command to be executed on the bash @params divert_output_to_log: If this flag is set to True the output of the given command is automatically put into the log-file. If the output of some command itself is further used, like science segments, this flag must be set to False, so that the output is diverted where it should go. """ l = logfile_name() # put the command used into the log file info(item, ">>> "+command) # and the output (and error) of the command as well if divert_output_to_log: command_actual = command+' >>%s 2>>%s '%(l,l) else: command_actual = command +' 2>>%s '%l # perform the command code, out, err = external_call(command_actual) if code>0: info(item, "ERROR: " +err)
95499224566e9b069470bbc218a52002020d0f64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/95499224566e9b069470bbc218a52002020d0f64/pylal_exttrig_llutils.py
output, error = internal_call(cmdtmp)
code, output, error = external_call(cmdtmp)
def make_cvs_copy(self, files, dest_dir): """ Copies all the files given in the list 'files' to dest_dir and creates a file 'cvs_versions.txt' in dest_dir containing the actual CVS version of the files @param files: list of files to be copied from self.input_dir @param dest_dir: destination directory """
95499224566e9b069470bbc218a52002020d0f64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/95499224566e9b069470bbc218a52002020d0f64/pylal_exttrig_llutils.py
coincTrigs = CoincInspiralUtils.coincInspiralTable(inspTriggers,'snr')
coincTrigs = CoincInspiralUtils.coincInspiralTable(inspTrigs,'snr')
def get_coincs_from_coire(self,files): """ uses CoincInspiralUtils to get data from old-style (coire'd) coincs """ coincTrigs = CoincInspiralUtils.coincInspiralTable() inspTrigs = SnglInspiralUtils.ReadSnglInspiralFromFiles(files, \ mangle_event_id = True,verbose=None) #note that it's hardcoded to use snr as the statistic coincTrigs = CoincInspiralUtils.coincInspiralTable(inspTriggers,'snr') try: inspInj = SimInspiralUtils.ReadSimInspiralFromFiles(files) coincTrigs.add_sim_inspirals(inspInj) #FIXME: name the exception! except: pass
e5ae69c744042b915e1b487e90286834eb35a53c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/e5ae69c744042b915e1b487e90286834eb35a53c/skylocutils.py
injection_area=bin_size*i
injection_area=bin_size*(i+1)
def _greedy_bin(greedyHist,greedyPoints,injection_bin_index,bin_size,Nsamples,confidence_levels): """ An interal function representing the common, dimensionally-independent part of the greedy binning algorithms. """ #Now call confidence level C extension function to determine top-ranked pixels (injectionconfidence,toppoints)=_calculate_confidence_levels( greedyHist, greedyPoints, injection_bin_index, bin_size, Nsamples ) #Determine interval/area contained within given confidence intervals nBins=0 confidence_levels.sort() reses={} toppoints=np.array(toppoints) for printcl in confidence_levels: nBins=1 #Start at top of list of ranked pixels... accl=toppoints[0,3] #Loop over next significant pixels and their confidence levels while accl<printcl and nBins<=len(toppoints): nBins=nBins+1 accl=toppoints[nBins-1,3] reses[printcl]=nBins*bin_size #Find area injection_area=None if injection_bin_index and injectionconfidence: i=list(np.nonzero(np.asarray(toppoints)[:,2]==injection_bin_index))[0] injection_area=bin_size*i return toppoints,injectionconfidence,reses,injection_area
a13e17ac84786e99a6ea7129f877320fbee2a115 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/a13e17ac84786e99a6ea7129f877320fbee2a115/bayespputils.py
def set_glob(self, file_glob): """ Sets the glob name """ self.add_var_opt('glob',file_glob) def set_input(self, input_file): """ Sets the input file name """ self.add_var_opt('input',input_file)
def get_ifo_tag(self): """ Returns the IFO tag string """ return self.__ifo_tag
3e68beb8ab8c781f241f889655a1038c100354fd /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/3e68beb8ab8c781f241f889655a1038c100354fd/inspiral.py
myAngle=arcsin(dY/dX)
myAngle=arctan(dY/dX)
def scatterPointer(A=None,B=None): """ Expects two ordered pairs as tuples (x1,y1) and (x2,y2)... Then this defines an angle. From this angle we orient a triangle point and return this as a tuple of requested size. This can be plotted by the scatter plot to point in a particular orientation. The direction right(0,0)->(1,0) is angle 0 radians, then we rotate counter clockwise from there... What is returned in a three element tuple (numsides,style,angle) which can be put into a plot call via marker=X as a **kwarg """ if A == None or B == None: return (3,0,0) if( A != type(tuple()) and len(A) != 2) \ or \ ( B != type(tuple()) and len(B) != 2): return (3,0,0) # # Calculate orientation of triangle # Ang = arcsin(dY/dX) dY=float(B[-1]-A[-1]) dX=float(B[0]-A[0]) if dY>=0 and dX>=0: myAngle=arcsin(dY/dX) elif dY>=0 and dX<0: myAngle=pi-arcsin(dY/dX) elif dY<0 and dX<0: myAngle=arcsin(dY/dX)+pi elif dY<0 and dX>0: myAngle=(2.0*pi)-arcsin(dY/dX) else: myAngle=0 return (3,0,myAngle)
85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7/followupPDSurface.py
myAngle=pi-arcsin(dY/dX)
myAngle=pi-arctan(dY/dX)
def scatterPointer(A=None,B=None): """ Expects two ordered pairs as tuples (x1,y1) and (x2,y2)... Then this defines an angle. From this angle we orient a triangle point and return this as a tuple of requested size. This can be plotted by the scatter plot to point in a particular orientation. The direction right(0,0)->(1,0) is angle 0 radians, then we rotate counter clockwise from there... What is returned in a three element tuple (numsides,style,angle) which can be put into a plot call via marker=X as a **kwarg """ if A == None or B == None: return (3,0,0) if( A != type(tuple()) and len(A) != 2) \ or \ ( B != type(tuple()) and len(B) != 2): return (3,0,0) # # Calculate orientation of triangle # Ang = arcsin(dY/dX) dY=float(B[-1]-A[-1]) dX=float(B[0]-A[0]) if dY>=0 and dX>=0: myAngle=arcsin(dY/dX) elif dY>=0 and dX<0: myAngle=pi-arcsin(dY/dX) elif dY<0 and dX<0: myAngle=arcsin(dY/dX)+pi elif dY<0 and dX>0: myAngle=(2.0*pi)-arcsin(dY/dX) else: myAngle=0 return (3,0,myAngle)
85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7/followupPDSurface.py
myAngle=arcsin(dY/dX)+pi
myAngle=arctan(dY/dX)+pi
def scatterPointer(A=None,B=None): """ Expects two ordered pairs as tuples (x1,y1) and (x2,y2)... Then this defines an angle. From this angle we orient a triangle point and return this as a tuple of requested size. This can be plotted by the scatter plot to point in a particular orientation. The direction right(0,0)->(1,0) is angle 0 radians, then we rotate counter clockwise from there... What is returned in a three element tuple (numsides,style,angle) which can be put into a plot call via marker=X as a **kwarg """ if A == None or B == None: return (3,0,0) if( A != type(tuple()) and len(A) != 2) \ or \ ( B != type(tuple()) and len(B) != 2): return (3,0,0) # # Calculate orientation of triangle # Ang = arcsin(dY/dX) dY=float(B[-1]-A[-1]) dX=float(B[0]-A[0]) if dY>=0 and dX>=0: myAngle=arcsin(dY/dX) elif dY>=0 and dX<0: myAngle=pi-arcsin(dY/dX) elif dY<0 and dX<0: myAngle=arcsin(dY/dX)+pi elif dY<0 and dX>0: myAngle=(2.0*pi)-arcsin(dY/dX) else: myAngle=0 return (3,0,myAngle)
85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7/followupPDSurface.py
myAngle=(2.0*pi)-arcsin(dY/dX)
myAngle=(2.0*pi)-arctan(dY/dX)
def scatterPointer(A=None,B=None): """ Expects two ordered pairs as tuples (x1,y1) and (x2,y2)... Then this defines an angle. From this angle we orient a triangle point and return this as a tuple of requested size. This can be plotted by the scatter plot to point in a particular orientation. The direction right(0,0)->(1,0) is angle 0 radians, then we rotate counter clockwise from there... What is returned in a three element tuple (numsides,style,angle) which can be put into a plot call via marker=X as a **kwarg """ if A == None or B == None: return (3,0,0) if( A != type(tuple()) and len(A) != 2) \ or \ ( B != type(tuple()) and len(B) != 2): return (3,0,0) # # Calculate orientation of triangle # Ang = arcsin(dY/dX) dY=float(B[-1]-A[-1]) dX=float(B[0]-A[0]) if dY>=0 and dX>=0: myAngle=arcsin(dY/dX) elif dY>=0 and dX<0: myAngle=pi-arcsin(dY/dX) elif dY<0 and dX<0: myAngle=arcsin(dY/dX)+pi elif dY<0 and dX>0: myAngle=(2.0*pi)-arcsin(dY/dX) else: myAngle=0 return (3,0,myAngle)
85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7/followupPDSurface.py
type="string",default=None,\
type="string",default="dummy",\
def convert2DHistTo3ColVectors(myMatrix,xBins=None,yBins=None): """ Create three vectors X,Y,Z for ease of use in functs like contour, and scatter instead of imshow! If [x,y]Bins==None use bin index for X, Y Input matrix assumed square!! """ if xBins==None: xBins=range(len(myMatrix)) if yBins==None: yBins=range(len(myMatrix[0])) if len(xBins)*len(yBins) != myMatrix.size: raise Exception, "Input matrix and bin vectors disagree!" tX=empty(myMatrix.size,type(xBins[0])) tY=empty(myMatrix.size,type(yBins[0])) tZ=empty(myMatrix.size,type(myMatrix[0][0])) myIndex=0 for ii,iVal in enumerate(xBins): for jj,jVal in enumerate(yBins): tX[myIndex]=iVal tY[myIndex]=jVal tZ[myIndex]=myMatrix[ii][jj] myIndex=myIndex+1 return tX,tY,tZ
85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7/followupPDSurface.py
origData=dict() origData["beam"]=beamSpigot.getDataStream(beamName,gpsStart,gpsEnd)
origData=dict()
def convert2DHistTo3ColVectors(myMatrix,xBins=None,yBins=None): """ Create three vectors X,Y,Z for ease of use in functs like contour, and scatter instead of imshow! If [x,y]Bins==None use bin index for X, Y Input matrix assumed square!! """ if xBins==None: xBins=range(len(myMatrix)) if yBins==None: yBins=range(len(myMatrix[0])) if len(xBins)*len(yBins) != myMatrix.size: raise Exception, "Input matrix and bin vectors disagree!" tX=empty(myMatrix.size,type(xBins[0])) tY=empty(myMatrix.size,type(yBins[0])) tZ=empty(myMatrix.size,type(myMatrix[0][0])) myIndex=0 for ii,iVal in enumerate(xBins): for jj,jVal in enumerate(yBins): tX[myIndex]=iVal tY[myIndex]=jVal tZ[myIndex]=myMatrix[ii][jj] myIndex=myIndex+1 return tX,tY,tZ
85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7/followupPDSurface.py
tmpData=beamSpigot.getDataStream(myLabel[myKey],gpsA,gpsB) mySnipData[myKey]=interp(mySnipData["time"], getTimeStamps(tmpData), tmpData)
if beamName == "dummy" and myKey == "beam": mySnipData[myKey]=ones(size(mySnipData["time"])) else: tmpData=beamSpigot.getDataStream(myLabel[myKey],gpsA,gpsB) mySnipData[myKey]=interp(mySnipData["time"], getTimeStamps(tmpData), tmpData)
def convert2DHistTo3ColVectors(myMatrix,xBins=None,yBins=None): """ Create three vectors X,Y,Z for ease of use in functs like contour, and scatter instead of imshow! If [x,y]Bins==None use bin index for X, Y Input matrix assumed square!! """ if xBins==None: xBins=range(len(myMatrix)) if yBins==None: yBins=range(len(myMatrix[0])) if len(xBins)*len(yBins) != myMatrix.size: raise Exception, "Input matrix and bin vectors disagree!" tX=empty(myMatrix.size,type(xBins[0])) tY=empty(myMatrix.size,type(yBins[0])) tZ=empty(myMatrix.size,type(myMatrix[0][0])) myIndex=0 for ii,iVal in enumerate(xBins): for jj,jVal in enumerate(yBins): tX[myIndex]=iVal tY[myIndex]=jVal tZ[myIndex]=myMatrix[ii][jj] myIndex=myIndex+1 return tX,tY,tZ
85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7/followupPDSurface.py
size=50,color='white')
size=starSize,color='white')
def convert2DHistTo3ColVectors(myMatrix,xBins=None,yBins=None): """ Create three vectors X,Y,Z for ease of use in functs like contour, and scatter instead of imshow! If [x,y]Bins==None use bin index for X, Y Input matrix assumed square!! """ if xBins==None: xBins=range(len(myMatrix)) if yBins==None: yBins=range(len(myMatrix[0])) if len(xBins)*len(yBins) != myMatrix.size: raise Exception, "Input matrix and bin vectors disagree!" tX=empty(myMatrix.size,type(xBins[0])) tY=empty(myMatrix.size,type(yBins[0])) tZ=empty(myMatrix.size,type(myMatrix[0][0])) myIndex=0 for ii,iVal in enumerate(xBins): for jj,jVal in enumerate(yBins): tX[myIndex]=iVal tY[myIndex]=jVal tZ[myIndex]=myMatrix[ii][jj] myIndex=myIndex+1 return tX,tY,tZ
85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7/followupPDSurface.py
size=50,color='white')
size=starSize,color='white')
def convert2DHistTo3ColVectors(myMatrix,xBins=None,yBins=None): """ Create three vectors X,Y,Z for ease of use in functs like contour, and scatter instead of imshow! If [x,y]Bins==None use bin index for X, Y Input matrix assumed square!! """ if xBins==None: xBins=range(len(myMatrix)) if yBins==None: yBins=range(len(myMatrix[0])) if len(xBins)*len(yBins) != myMatrix.size: raise Exception, "Input matrix and bin vectors disagree!" tX=empty(myMatrix.size,type(xBins[0])) tY=empty(myMatrix.size,type(yBins[0])) tZ=empty(myMatrix.size,type(myMatrix[0][0])) myIndex=0 for ii,iVal in enumerate(xBins): for jj,jVal in enumerate(yBins): tX[myIndex]=iVal tY[myIndex]=jVal tZ[myIndex]=myMatrix[ii][jj] myIndex=myIndex+1 return tX,tY,tZ
85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7/followupPDSurface.py
facecolor=None,\
def convert2DHistTo3ColVectors(myMatrix,xBins=None,yBins=None): """ Create three vectors X,Y,Z for ease of use in functs like contour, and scatter instead of imshow! If [x,y]Bins==None use bin index for X, Y Input matrix assumed square!! """ if xBins==None: xBins=range(len(myMatrix)) if yBins==None: yBins=range(len(myMatrix[0])) if len(xBins)*len(yBins) != myMatrix.size: raise Exception, "Input matrix and bin vectors disagree!" tX=empty(myMatrix.size,type(xBins[0])) tY=empty(myMatrix.size,type(yBins[0])) tZ=empty(myMatrix.size,type(myMatrix[0][0])) myIndex=0 for ii,iVal in enumerate(xBins): for jj,jVal in enumerate(yBins): tX[myIndex]=iVal tY[myIndex]=jVal tZ[myIndex]=myMatrix[ii][jj] myIndex=myIndex+1 return tX,tY,tZ
85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/85dd29f68b1a30f27a22c6fa0de5a1f96b00e3f7/followupPDSurface.py
self.daq = nds.daq(self.host, self.port)
self.daq = nds.daq(host, port)
def __init__(self, host, port): self.daq = nds.daq(self.host, self.port) self.channels = self.daq.recv_channel_list() self.channelLeaves, self.channelTree = make_channel_tree(self.channels) self.rates = tuple(sorted(set(int(c.rate) for c in self.channels))) self.channel_types = tuple(c for c in nds.channel_type.values.values() if c != nds.channel_type.unknown) self.selected_rates = frozenset(self.rates) self.selected_channel_types = frozenset(self.channel_types)
77730307054d2184fca65b719e2f077650cb3c6c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/77730307054d2184fca65b719e2f077650cb3c6c/channel_browser.py
fd, filename = tempfile.mkstemp(suffix = ".sqlite", dir = path) os.close(fd)
temporary_file = tempfile.NamedTemporaryFile(suffix = ".sqlite", dir = path) def new_unlink(self, orig_unlink = temporary_file.unlink): try: orig_unlink("%s-journal" % self) except: pass orig_unlink(self) temporary_file.unlink = new_unlink filename = temporary_file.name temporary_files[filename] = temporary_file
def mktmp(path, verbose = False): fd, filename = tempfile.mkstemp(suffix = ".sqlite", dir = path) os.close(fd) if verbose: print >>sys.stderr, "using '%s' as workspace" % filename # mkstemp() ignores umask, creates all files accessible # only by owner; we should respect umask. note that # os.umask() sets it, too, so we have to set it back after # we know what it is umsk = os.umask(0777) os.umask(umsk) os.chmod(filename, 0666 & ~umsk) return filename
494d717ccc5a701b4f41290104b6e60458650beb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/494d717ccc5a701b4f41290104b6e60458650beb/dbtables.py
os.remove(working_filename)
del temporary_files[working_filename]
def discard_connection_filename(filename, working_filename, verbose = False): """ Like put_connection_filename(), but the working copy is simply deleted instead of being copied back to its original location. This is a useful performance boost if it is known that no modifications were made to the file, for example if queries were performed but no updates. Note that the file is not deleted if the working copy and original file are the same, so it is always safe to call this function after a call to get_connection_filename() even if a separate working copy is not created. """ if working_filename != filename: if verbose: print >>sys.stderr, "removing '%s' ..." % working_filename, os.remove(working_filename) if verbose: print >>sys.stderr, "done."
494d717ccc5a701b4f41290104b6e60458650beb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/494d717ccc5a701b4f41290104b6e60458650beb/dbtables.py
maxbin=0
def getinjpar(inj,parnum): if parnum==0: return inj.mchirp if parnum==1: return inj.eta if parnum==2: return inj.get_end() if parnum==3: return inj.phi0 if parnum==4: return inj.distance if parnum==5: return inj.longitude if parnum==6: return inj.latitude if parnum==7: return inj.polarization if parnum==8: return inj.inclination return None
33519ea48f8ccd5716372f3219410b44ab5f50ca /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/33519ea48f8ccd5716372f3219410b44ab5f50ca/OddsPostProc.py
hist[maxbin]=0 frac=frac+(maxbin/len(pos))
hist[maxpos]=0 frac=frac+(float(maxbin)/float(len(pos)))
def getinjpar(inj,parnum): if parnum==0: return inj.mchirp if parnum==1: return inj.eta if parnum==2: return inj.get_end() if parnum==3: return inj.phi0 if parnum==4: return inj.distance if parnum==5: return inj.longitude if parnum==6: return inj.latitude if parnum==7: return inj.polarization if parnum==8: return inj.inclination return None
33519ea48f8ccd5716372f3219410b44ab5f50ca /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5758/33519ea48f8ccd5716372f3219410b44ab5f50ca/OddsPostProc.py