repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
presto | presto-master/python/presto/cosine_rand.py | from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import numpy as Num
import os
import json
from presto.psr_utils import hist
from presto.Pgplot import plotxy, closeplot
here = os.path.dirname(__file__)
n = 1000
if (0):
# Use the following to generate the xs
from presto.simple_roots import newton_raphson
rval = 0.0
rs = Num.arange(n+1, dtype=float)/n
xs = Num.zeros(n+1, dtype=float)
def func(x):
return Num.sin(2.0*Num.pi*x)/(2.0*Num.pi) + x - rval
def dfunc(x):
return Num.cos(2.0*Num.pi*x) + 1
for (ii, rval) in enumerate(rs[:-1]):
if (ii==n/2):
xs[ii] = 0.5
else:
xs[ii] = newton_raphson(func, dfunc, 0.0, 1.0)
xs[0] = 0.0
xs[n] = 1.0
json.dump(list(i for i in xs), open('cosine_rand.json', 'w'))
else:
pfile = os.path.join(here, 'cosine_rand.json')
xs = json.load(open(pfile))
def cosine_rand(num):
"""cosine_rand(num): Return num phases that are randomly distributed
as per a sinusoid with maximum at phase=0 (0 < phase < 1).
"""
rands = n*Num.random.random(num)
indices = rands.astype(Num.int)
fracts = rands-indices
lo = Num.take(xs, indices)
hi = Num.take(xs, indices+1)
return fracts*(hi-lo)+lo
if __name__ == '__main__':
if (0):
numtrials = 20
numrandnums = 1000000
for funct in [cosine_rand1, cosine_rand2]:
times = []
for jj in range(numtrials):
tt = time.clock()
funct(numrandnums)
times.append(time.clock()-tt)
print("Average time = ", Num.add.reduce(Num.asarray(times))/numtrials)
else:
rs = Num.arange(n+1, dtype=float)/n
plotxy(xs, rs)
closeplot()
hist(cosine_rand(10000), 100, color='red')
closeplot()
| 1,936 | 27.485294 | 82 | py |
presto | presto-master/python/presto/barycenter.py | from builtins import range
## Automatically adapted for numpy Apr 14, 2006 by convertcode.py
import math
import numpy as Numeric
# Most of these routines were taken from TEMPO v11.004 (arrtim.f, setup.f)
def convert_angle(inval, flag=1):
"""
convert_angle(inval, flag=1):
Converts a coded double to an angle based on the optional 'flag'
'flag' = 1: Input form = ddmmss.ss, Output in radians (default)
'flag' = 2: Input form = ddmmss.ss, Output in frac of 2pi
'flag' = 3: Input form = hhmmss.ss, Output in radians
'flag' = 4: Input form = hhmmss.ss, Output in frac of 2pi
"""
twopi = 6.28318530717958647692528676655900576
(im, id) = math.modf(inval / 10000.0) # Integer deg/hours
(s, im) = math.modf((im * 10000.0) / 100.0) # Integer minutes
s = s * 100.0 # seconds
ang = (id + (im + s / 60.0) / 60.0) / 360.0 # Angle in frac of 2pi
if (flag > 2): ang = ang * 15.0
if (flag == 1 or flag == 3): ang = ang * twopi
return ang
def hms2hours(hms):
"""
hms2hours(hms):
Converts an angle 'hms' expressed as hhmmss.ss into
fractional hours.
"""
return convert_angle(hms, 4) * 24.0
def dms2deg(dms):
"""
dms2deg(dms):
Converts an angle 'dms' expressed as ddmmss.ss into
fractional degrees.
"""
return convert_angle(dms, 2) * 360.0
def rad2dms(rad):
"""
rad2dms(rad):
Convert 'rad' radians into dd:mm:ss.sss format.
"""
deg = rad * 180.0 / math.pi
(mm, dd) = math.modf(deg)
(ss, mm) = math.modf(mm * 60.0)
ss = ss * 60.0
id = abs(int(dd))
if (abs(dd) < 10.0):
if (dd < 0): d = '-0'+repr(id)
else: d = '0'+repr(id)
else:
if (dd < 0): d = '-'+repr(id)
else: d = repr(id)
im = abs(int(mm))
if (abs(mm) < 10): m = '0'+repr(im)
else: m = repr(im)
if (abs(ss) < 10): s = '0'+repr(abs(ss))
else: s = repr(abs(ss))
return d+':'+m+':'+s
def rad2hms(rad):
"""
rad2hms(rad):
Convert 'rad' radians into hh:mm:ss.sss format.
"""
hours = rad * 12.0 / math.pi
(mm, hh) = math.modf(hours)
(ss, mm) = math.modf(mm * 60.0)
ss = ss * 60.0
ih = abs(int(hh))
if (abs(hh) < 10.0):
if (hh < 0): h = '-0'+repr(ih)
else: h = '0'+repr(ih)
else:
if (hh < 0): h = '-'+repr(ih)
else: h = repr(ih)
im = abs(int(mm))
if (abs(mm) < 10): m = '0'+repr(im)
else: m = repr(im)
if (abs(ss) < 10): s = '0'+repr(abs(ss))
else: s = repr(abs(ss))
return h+':'+m+':'+s
def geodetic2geocentcyl(lat, lon, elev):
"""
geodetic2geocentcyl(lat, lon, elev):
Return a list containing the Geocentric Cylindrical coords.
'lat' is Geodetic latitude in degrees (ddmmss.ss)
'long' is Geodetic west longitude in degrees (ddmmss.ss)
'elev' is meters above mean sea level
"""
rad_e = 6378140.0
velc = 299792458.0
flat = 1.0/298.257
ault = 499.004786
lon = convert_angle(lon)
lat = convert_angle(lat)
aa_c = 1.0 / math.sqrt(1.0 + (-2.0 + flat) * \
flat * math.sin(lat) * math.sin(lat))
aa_arcf = (rad_e * aa_c + elev) * math.cos(lat)
aa_arsf = (rad_e * (1.0 - flat) * (1.0 - flat) * \
aa_c + elev) * math.sin(lat)
hlt = math.atan2(aa_arsf, aa_arcf)
erad = math.sqrt(aa_arcf * aa_arcf + aa_arsf * aa_arsf)
hrd = erad / (velc * ault)
site = [] # site is a list containing [r, z/velc, longitude in rad]
site.append(hrd * math.cos(hlt) * ault)
site.append(site[0] * math.tan(hlt))
site.append(lon)
return site
def xyz2geocentcyl(x, y, z):
"""
xyz2geocentcyl(x, y, z):
Return a list containing the Geocentric Cylindrical coords.
'x', 'y', and 'z' are referenced to the Geocenter in m.
"""
velc = 299792458.0
ault = 499.004786
erad = math.sqrt(x * x + y * y + z * z)
hlt = math.asin(z / erad)
lon = math.atan2(-y, x)
hrd = erad / (velc * ault)
site = [] # site is a list containing [r, z/velc, longitude in rad]
site.append(hrd * math.cos(hlt) * ault)
site.append(site[0] * math.tan(hlt))
site.append(lon)
return site
def obs_coords(observ):
"""
obs_coords(observ):
Return a list containing the Geocentric Cylindrical Coords for
an observatory found in the TEMPO 'obsys.dat'.
'observ' is the two letter observatory code
from 'obsys.dat' (i.e. 'PK' = Parkes)
"""
obs = {}
obs['GB'] = [382645.48, 795054.53, 798.5, '', 'GREEN BANK']
obs['QU'] = [422333.2, 722040.4, 306.0, '', 'QUABBIN']
obs['AO'] = [2390490.0, -5564764.0, 1994727.0, 'XYZ', 'ARECIBO XYZ (JPL)']
obs['HO'] = [-424818.0, -1472621.0, 50.0, '', 'Hobart, Tasmania']
obs['PR'] = [402047.7, 743853.85, 43.0, '', 'PRINCETON']
obs['VL'] = [-1601192.0, -5041981.4, 3554871.4, 'XYZ', 'VLA XYZ']
obs['PK'] = [-330000.04, -1481542.00, 392.0, '', 'PARKES']
obs['JB'] = [3822252.643, -153995.683, 5086051.443, 'XYZ', 'JODRELL BANK']
obs['G3'] = [382546.30, 795056.36, 893.7, '', 'GB 300FT']
obs['G1'] = [382615.409, 795009.613, 880.87, '', 'GB 140FT']
obs['G8'] = [382545.90, 795036.87, 835.8, '', 'GB 85-3']
obs['V2'] = [340443.497, 1073703.819, 2124.0, '', 'VLA SITE']
obs['BO'] = [443118.48, -113848.48, 25.0, '', 'NORTHERN CROSS']
obs['MO'] = [-352219.00, -1492525.00, 500.0, '', 'MOST']
obs['NC'] = [4324165.81, 165927.11, 4670132.83, 'XYZ', 'Nancay']
obs['EF'] = [4033949.5, 486989.4, 4900430.8, 'XYZ', 'Effelsberg']
obs['JB'] = [531412.0, 21824.0, 78.0, '', 'JODRELL BANK']
obs['FB'] = [332235.0, 1171501.0, 0.0, '', 'Fallbrook']
obs['MT'] = [314119.6, 1105304.4, 2606.0, '', 'MMT']
if obs[observ][3] == 'XYZ':
return xyz2geocentcyl(obs[observ][0], obs[observ][1], \
obs[observ][2])
else:
return geodetic2geocentcyl(obs[observ][0], obs[observ][1], \
obs[observ][2])
def precess_J2000_to_B1950(ra, dec, rapm=0.0, decpm=0.0, par=0.0, vel=0.0):
"""
precess_J2000_to_B1950(ra, dec, rapm, decpm, par, vel):
Precess a set of J2000.0 FK5 coords to epoch B1950.0 FK4.
Return a list containing the B1950 version of the arguments.
'ra' is the J2000 right ascension (hhmmss.ssss)
'dec' is the J2000 declination (ddmmss.ssss)
'rapm' is the J2000 RA proper motion in rad/Julian Year (0.0)
'decpm' is the J2000 DEC proper motion in rad/Julian Year (0.0)
'par' is the parallax in arcsec (0.0)
'vel' is the radial velocity in km/s (+ = away from us) (0.0)
Note: Parenthesized values at the ends of the above lines are the
default values.
"""
# This is converted from the SLALIB routine fk524.f
# This routine converts stars from the new, IAU 1976, FK5, Fricke
# system, to the old, Bessel-Newcomb, FK4 system. The precepts
# of Smith et al (Ref 1) are followed, using the implementation
# by Yallop et al (Ref 2) of a matrix method due to Standish.
# Kinoshita's development of Andoyer's post-Newcomb precession is
# used. The numerical constants from Seidelmann et al (Ref 3) are
# used canonically.
twopi = 6.283185307179586476925287
# Radians per year to arcsec per century
pmf = 100.0 * 60.0 * 60.0 * 360.0 / twopi
tiny = 1.0e-30
# Km per sec to AU per tropical century
# = 86400 * 36524.2198782 / 149597870
vf = 21.095
a = Numeric.array([-1.62557e-6, -0.31919e-6, -0.13843e-6, \
+1.245e-3, -1.580e-3, -0.659e-3])
emi = Numeric.array([[+0.9999256795, -0.0111814828, -0.0048590040,
-0.000551, -0.238560, +0.435730],
[+0.0111814828, +0.9999374849, -0.0000271557,
+0.238509, -0.002667, -0.008541],
[+0.0048590039, -0.0000271771, +0.9999881946,
-0.435614, +0.012254, +0.002117],
[-2.42389840e-6, +2.710544e-8, +1.177742e-8,
+0.99990432, -0.01118145, -0.00485852],
[-2.710544e-8, -242392702e-6, +6.585e-11,
+0.01118145, +0.99991613, -0.00002716],
[-1.177742e-8, +6.585e-11, -2.42404995e-6,
+0.00485852, -0.00002717,+0.99996684]])
r = convert_angle(ra, 3)
d = convert_angle(dec, 1)
ur = rapm * pmf
ud = decpm * pmf
px = par
rv = vel
sr = math.sin(r)
cr = math.cos(r)
sd = math.sin(d)
cd = math.cos(d)
w = vf * rv * px
x = cr * cd
y = sr * cd
z = sd
v1 = Numeric.array([x, y, z, -ur * y - cr * sd * ud + w * x,
ur * x - sr * sd * ud + w * y, cd * ud + w * z])
# convert position+velocity vector to BN system
v2 = Numeric.zeros(6, 'd')
for i in range(6):
w = 0.0
for j in range(6):
w = w + emi[j][i] * v1[j]
v2[i] = w
# position vector components and magnitude
x = v2[0]
y = v2[1]
z = v2[2]
rxyz = math.sqrt(x * x + y * y + z * z)
# apply e-terms to position
w = x * a[0] + y * a[1] + z * a[2]
x = x + a[0] * rxyz - w * x
y = y + a[1] * rxyz - w * y
z = z + a[2] * rxyz - w * z
# recompute magnitude
rxyz = math.sqrt(x * x + y * y + z * z)
# apply e-terms to both position and velocity
x = v2[0]
y = v2[1]
z = v2[2]
w = x * a[0] + y * a[1] + z * a[2]
wd = x * a[3] + y * a[4] + z * a[5]
x = x + a[0] * rxyz - w * x
y = y + a[1] * rxyz - w * y
z = z + a[2] * rxyz - w * z
xd = v2[3] + a[3] * rxyz - wd * x
yd = v2[4] + a[4] * rxyz - wd * y
zd = v2[5] + a[5] * rxyz - wd * z
# convert to spherical
rxysq = x * x + y * y
rxy = math.sqrt(rxysq)
if (x==0.0 and y==0.0): r = 0.0
else:
r = math.atan2(y, x)
if (r < 0.0): r = r + twopi
d = math.atan2(z, rxy)
if (rxy > tiny):
ur = (x * yd - y * xd) / rxysq
ud = (zd * rxysq - z * (x * xd + y * yd)) / ((rxysq + z * z) * rxy)
# radial velocity and parallax
if (px > tiny):
rv = (x * xd + y * yd + z * zd) / (px * vf * rxyz)
px = px / rxyz
return [r, d, ur / pmf, ud / pmf, px, rv]
# Most of the following time formulae are from
# http://legacy.gsfc.nasa.gov/docs/xte/abc/time_tutorial.html
def TAI_minus_UTC(mjd):
"""
TAI_minus_UTC(mjd):
Return the difference between TAI (International Atomic Time)
and UTC in seconds at a specified MJD.
'mjd' is the Modified Julian Date UTC
"""
LeapSecMJD = Numeric.array([41499, 41683, 42048, 42413, 42778, \
43144, 43509, 43874, 44239, 44786, \
45151, 45516, 46247, 47161, 47892, \
48257, 48804, 49169, 49534, 50083, \
50630, 51179])
TAI_UTC_diffs = Numeric.arange(len(LeapSecMJD)+1)+10
TAI_minus_UTC = TAI_UTC_diffs[Numeric.searchsorted(LeapSecMJD, mjd)]
return TAI_minus_UTC
def TT_minus_UTC(mjd):
"""
TT_minus_UTC(mjd):
Return the difference between TT (Terrestrial Dynamic Time)
and UTC in seconds at a specified MJD. TT used to be called
ET (Ephemeris Time).
'mjd' is the Modified Julian Date UTC
"""
TT_minus_TAI = 32.184
TT_minus_UTC = TT_minus_TAI + TAI_minus_UTC(mjd)
return TT_minus_UTC
def TDB_minus_UTC(mjd):
"""
TDB_minus_UTC(mjd):
Return the difference between TDB (Barycentric Dynamic Time)
and UTC in seconds at a specified MJD.
'mjd' is the Modified Julian Date UTC
"""
g = (357.53 + 0.9856003 * (mjd - 51544.5)) * 0.017453292519943295769
TDB_minus_TT = 0.001658 * math.sin(g) + 0.000014 * math.sin(2.0 * g)
TDB_minus_UTC = TDB_minus_TT + TT_minus_UTC(mjd)
return TDB_minus_UTC
| 12,153 | 36.862928 | 78 | py |
presto | presto-master/python/presto/rfifind.py | from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import range
from builtins import object
import numpy as np
from scipy.signal import medfilt
from presto import infodata
from presto.Pgplot import *
"""
class rfifind:
Read in stats and mask information from an rfifind run.
Use that information to make a list of bad channels to
zap and/or weight and offset properly to (hopefully) improve
your overall signal-to-noise ratio.
A typical usage would be something like:
In [1]: import rfifind
In [2]: a = rfifind.rfifind('SPIGOT_040424_1614-22_rfifind.mask')
In [3]: a.set_zap_chans(power=1000)
Recommending to zap 75 channels:
55 channels from Fourier power levels > 1000.0
6 channels from avgs levels being off by > 5.0 sigma
2 channels from stds levels being off by > 2.0 sigma
20 channels for being within 0.01 of the band edge
0 channels were specified by the user
0 channels for having no variation
The mask recommends 116 additional bad channels
adding them to the zap_chans list.
<< Plot is shown here >>
Type <RETURN> for next page:
In [4]: a.write_zap_chans()
In [5]: a.set_weights_and_offsets()
In [6]: a.write_weights_and_offsets()
"""
class rfifind(object):
def __init__(self, filename):
self.basename = filename[:filename.find("_rfifind.")+8]
self.idata = infodata.infodata(self.basename+".inf")
self.read_stats()
self.read_mask()
self.get_bandpass()
if len(self.goodints):
self.get_median_bandpass()
self.determine_padvals()
def read_stats(self):
x = open(self.basename+".stats")
self.nchan, self.nint, self.ptsperint, self.lobin, self.numbetween = \
np.fromfile(x, dtype=np.int32, count=5)
count = self.nchan * self.nint
self.pow_stats = np.fromfile(x, dtype=np.float32, count=count)
self.avg_stats = np.fromfile(x, dtype=np.float32, count=count)
self.std_stats = np.fromfile(x, dtype=np.float32, count=count)
self.pow_stats.shape = (self.nint, self.nchan)
self.avg_stats.shape = (self.nint, self.nchan)
self.std_stats.shape = (self.nint, self.nchan)
x.close()
def read_mask(self):
x = open(self.basename+".mask")
self.time_sig, self.freq_sig, self.MJD, self.dtint, self.lofreq, self.df = \
np.fromfile(x, dtype=np.float64, count=6)
self.freqs = np.arange(self.nchan)*self.df + self.lofreq
self.times = np.arange(self.nint)*self.dtint
self.MJDs = self.times/86400.0 + self.MJD
nchan, nint, ptsperint = np.fromfile(x, dtype=np.int32, count=3)
nzap = np.fromfile(x, dtype=np.int32, count=1)[0]
if nzap:
self.mask_zap_chans = np.fromfile(x, dtype=np.int32, count=nzap)
else:
self.mask_zap_chans = np.asarray([])
self.mask_zap_chans = set(self.mask_zap_chans)
if len(self.mask_zap_chans)==self.nchan:
print("WARNING!: All channels recommended for masking!")
nzap = np.fromfile(x, dtype=np.int32, count=1)[0]
if nzap:
self.mask_zap_ints = np.fromfile(x, dtype=np.int32, count=nzap)
else:
self.mask_zap_ints = np.asarray([])
if len(self.mask_zap_ints)==self.nint:
print("WARNING!: All intervals recommended for masking!")
nzap_per_int = np.fromfile(x, dtype=np.int32, count=nint)
self.mask_zap_chans_per_int = []
for nzap in nzap_per_int:
if nzap:
if nzap == nchan:
tozap = np.arange(0, nchan, dtype=np.int32)
else:
tozap = np.fromfile(x, dtype=np.int32, count=nzap)
else:
tozap = np.asarray([])
self.mask_zap_chans_per_int.append(tozap)
x.close()
def get_bandpass(self, plot=False):
"""
get_bandpass():
This routine returns a 'good' bandpass based on an average
of the average bandpasses, with the exception of the
intervals that were recommended for zapping in the mask.
"""
ints = np.arange(self.nint)
badints = self.mask_zap_ints
goodints = set(ints) - set(badints)
goodints = np.asarray(list(goodints))
self.goodints = goodints
if not len(goodints):
print("WARNING!: Cannot get bandpass because all intervals zapped.")
return 0.0
self.bandpass_avg = self.avg_stats[goodints,:].mean(0)
self.bandpass_std = self.std_stats[goodints,:].mean(0)
self.bandpass_pow = self.pow_stats[goodints,:].mean(0)
if plot:
plotxy(self.bandpass_avg, self.freqs, labx="Frequency (MHz)")
plotxy(self.bandpass_avg+self.bandpass_std, self.freqs, color="red")
plotxy(self.bandpass_avg-self.bandpass_std, self.freqs, color="red")
closeplot()
return self.bandpass_avg
def get_median_bandpass(self, medlen=21, plot=False):
self.median_bandpass_avg = medfilt(self.bandpass_avg, medlen)
self.median_bandpass_std = medfilt(self.bandpass_std, medlen)
if plot:
plotxy(self.median_bandpass_avg, self.freqs,
labx="Frequency (MHz)")
plotxy(self.median_bandpass_avg+self.median_bandpass_std,
self.freqs, color="red")
plotxy(self.median_bandpass_avg-self.median_bandpass_std,
self.freqs, color="red")
closeplot()
return self.median_bandpass_avg
def determine_padvals(self, frac_to_keep=0.8):
"""
determine_padvals():
This routines determines padding values to use for each
channel.
"""
# NOTE: Casting to 64/32-bit floats are done to mimick 'mask.c'.
num = int(np.round(self.nint*frac_to_keep))
start = (self.nint - num) // 2
self.padvals = np.zeros(self.nchan, dtype='float32')
for ichan in range(self.nchan):
isort = np.argsort(self.avg_stats[:,ichan])
self.padvals[ichan] = np.mean(self.avg_stats.astype('float64')[isort,ichan][start:start+num])
def get_pow_zap_chans(self, pow_threshold=100.0):
return np.where(self.bandpass_pow > pow_threshold)[0]
def get_avg_zap_chans(self, sigma=5.0):
resid = np.fabs(self.bandpass_avg - self.median_bandpass_avg)
has_var = self.median_bandpass_std != 0.0
no_var = self.median_bandpass_std == 0.0
resid[has_var] /= self.median_bandpass_std[has_var]
resid[no_var] = np.inf
return np.where(resid > sigma)[0]
def get_std_zap_chans(self, sigma=5.0):
resid = np.fabs(self.bandpass_std - self.median_bandpass_std)
has_var = self.median_bandpass_std != 0.0
no_var = self.median_bandpass_std == 0.0
resid[has_var] /= self.median_bandpass_std[has_var]
resid[no_var] = np.inf
return np.where(resid > sigma)[0]
def get_no_signal_chans(self):
med_std = np.median(self.median_bandpass_std)
return np.where(self.bandpass_std < 0.15 * med_std)[0]
def get_edge_chans(self, edges=0.01):
nedge = int(self.nchan * edges)
return np.concatenate((np.arange(nedge),
np.arange(nedge) + self.nchan-nedge))
def set_zap_chans(self, power=100.0, asigma=5.0, ssigma=2.0, plot=True,
edges=0.01, usemask=True, chans=[]):
self.user_zap_chans = set(chans)
self.pow_zap_chans = set(self.get_pow_zap_chans(power))
self.avg_zap_chans = set(self.get_avg_zap_chans(asigma))
self.std_zap_chans = set(self.get_std_zap_chans(ssigma))
self.no_signal_chans = set(self.get_no_signal_chans())
self.edge_chans = set(self.get_edge_chans(edges))
self.zap_chans = self.pow_zap_chans | \
self.avg_zap_chans | \
self.std_zap_chans | \
self.user_zap_chans | \
self.edge_chans | \
self.no_signal_chans
print("Recommending to zap %d channels:" % len(self.zap_chans))
print(" %d channels from Fourier power levels > %.1f" % \
(len(self.pow_zap_chans), power))
print(" %d channels from avgs levels being off by > %.1f sigma" % \
(len(self.avg_zap_chans), asigma))
print(" %d channels from stds levels being off by > %.1f sigma" % \
(len(self.std_zap_chans), ssigma))
print(" %d channels for being within %.3g of the band edge" % \
(len(self.edge_chans), edges))
print(" %d channels were specified by the user" % len(self.user_zap_chans))
print(" %d channels for having no variation" % len(self.no_signal_chans))
if usemask:
onlymask = self.mask_zap_chans - self.zap_chans
print(" The mask recommends %d additional bad channels" % len(onlymask))
if len(onlymask):
print(" adding them to the zap_chans list.")
self.zap_chans = self.zap_chans | self.mask_zap_chans
self.zap_chans = np.asarray(sorted(list(self.zap_chans)))
if plot: self.plot_zapped_bandpass()
def plot_zapped_chans(self, device="/xwin"):
plotxy(self.bandpass_avg, self.freqs,
labx="Frequency (MHz)", device=device)
plotxy(self.median_bandpass_avg+self.median_bandpass_std,
self.freqs, color='blue')
plotxy(self.median_bandpass_avg-self.median_bandpass_std,
self.freqs, color='blue')
plotxy(self.bandpass_avg[self.zap_chans],
self.freqs[self.zap_chans],
line=None, symbol=16, color="red")
closeplot()
def plot_zapped_bandpass(self, device="/xwin"):
not_zapped = set(np.arange(self.nchan)) - set(self.zap_chans)
not_zapped = np.asarray(list(not_zapped))
if len(not_zapped):
yhi = (self.median_bandpass_avg+1.5*self.median_bandpass_std).max()
ylo = (self.median_bandpass_avg-1.5*self.median_bandpass_std).min()
plotxy(self.median_bandpass_avg, self.freqs, rangey=[ylo, yhi],
labx="Frequency (MHz)", color='light gray', device=device)
plotxy(self.median_bandpass_avg+self.median_bandpass_std,
self.freqs, color='blue')
plotxy(self.median_bandpass_avg-self.median_bandpass_std,
self.freqs, color='blue')
plotxy(self.bandpass_avg[not_zapped], self.freqs[not_zapped], color='white')
plotxy(self.median_bandpass_avg[self.zap_chans], self.freqs[self.zap_chans],
line=None, symbol=16, color='red')
closeplot()
else:
print("WARNING!: All channels recommended for masking!")
def write_zap_chans(self, filename=None):
if filename is None:
filename = self.basename+".zapchans"
outfile = open(filename, "w")
for chan in np.unique(sorted(self.zap_chans)):
outfile.write("%d\n" % chan)
outfile.close()
def set_weights_and_offsets(self):
# The channel with the highest, non-zapped std has a weight of 1.0
# That channels std is called std_norm. All the others are set to
# std_norm / std[i], where i is the channel number
not_zapped = set(np.arange(self.nchan)) - set(self.zap_chans)
not_zapped = np.asarray(list(not_zapped))
if len(not_zapped):
std_norm = self.bandpass_std[not_zapped].max()
has_var = self.bandpass_std != 0.0
# weights for channels without variance will automatically be 0
self.weights = np.zeros_like(self.bandpass_std)
self.weights[has_var] = std_norm / self.bandpass_std[has_var]
self.weights[self.zap_chans] = 0.0
self.offsets = self.bandpass_avg
else:
print("WARNING!: All channels recommended for masking!")
def write_weights_and_offsets(self, filename=None, invertband=False):
if filename is None:
filename = self.basename+".weights"
outfile = open(filename, "w")
outfile.write("# Chan Weight Offset\n")
weights = self.weights[::-1] if invertband else self.weights
offsets = self.offsets[::-1] if invertband else self.offsets
for c, w, o in zip(np.arange(self.nchan), self.weights, self.offsets):
outfile.write("%5d %7.5f %7.5f\n" % (c, w, o))
outfile.close()
def write_bandpass(self, filename=None, invertband=False):
if filename is None:
filename = self.basename+".bandpass"
outfile = open(filename, "w")
avg = self.bandpass_avg[::-1] if invertband else self.bandpass_avg
std = self.bandpass_std[::-1] if invertband else self.bandpass_std
freqs = self.freqs[::-1] if invertband else self.freqs
outfile.write("# Chan Freq(MHz) Mean StDev\n")
for ii, (freq, mean, stdev) in enumerate(zip(freqs, avg, std)):
outfile.write("%6d %9.3f %9.3f %9.3f\n" % (ii, freq, mean, stdev))
outfile.close()
def write_median_bandpass(self, filename=None, invertband=False):
if filename is None:
filename = self.basename+".median_bandpass"
outfile = open(filename, "w")
avg = self.median_bandpass_avg[::-1] if invertband else self.median_bandpass_avg
std = self.median_bandpass_std[::-1] if invertband else self.median_bandpass_std
freqs = self.freqs[::-1] if invertband else self.freqs
outfile.write("# Chan Freq(MHz) Mean StDev\n")
for ii, (freq, mean, stdev) in enumerate(zip(freqs, avg, std)):
outfile.write("%6d %9.3f %9.3f %9.3f\n" % (ii, freq, mean, stdev))
outfile.close()
def write_weights(self, threshold=0.05, filename=None, invertband=False):
# This weights file works for psrfits_subband
if filename is None:
filename = self.basename+".weights"
outfile = open(filename, "w")
outfile.write("# Chan Weight\n")
weights = self.weights[::-1] if invertband else self.weights
for c, w in zip(np.arange(self.nchan), weights):
if w > threshold:
outfile.write("%5d 1\n" % (c))
else:
outfile.write("%5d 0\n" % (c))
outfile.close()
if __name__=="__main__":
import sys
a = rfifind(sys.argv[1])
if len(a.goodints)==0:
exit(0)
sys.stderr.write("WARNING!: If raw data have channels in decreasing freq\n")
sys.stderr.write(" order, the channel ordering as given will be\n")
sys.stderr.write(" inverted! Use 'invertband=True' in \n")
sys.stderr.write(" write_weights() in that case!\n")
if (a.idata.telescope=='GBT' and a.idata.lofreq < 1000.0):
sys.stderr.write("Data is from GBT Prime Focus, auto-flipping the weights/offsets...\n\n")
invert = True
else:
invert = False
a.set_zap_chans(power=200.0,
edges=0.01,
asigma=2.0,
ssigma=2.0,
usemask=True,
plot=True,
chans=[])
a.write_zap_chans()
a.set_weights_and_offsets()
a.write_weights(invertband=invert)
a.write_bandpass(invertband=invert)
#a.write_weights_and_offsets(invertband=invert)
| 15,832 | 44.108262 | 105 | py |
presto | presto-master/python/presto/psrfits.py | #!/usr/bin/env python
"""
Collect PSRFITS information, emulating behavior of PRESTO.
Read PSRFITS data.
Patrick Lazarus, May 11, 2010
Jul 4, 2016 (Scott Ransom added 2-bit reading)
Mar 25, 2021 (Scott Ransom added 1-bit reading)
"""
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from builtins import object
import re
import os
import os.path
import warnings
import argparse
import astropy.io.fits as pyfits
from astropy import coordinates, units
import astropy.time as aptime
import numpy as np
from presto import spectra
from presto import psr_constants as pc
# Regular expression for parsing DATE-OBS card's format.
date_obs_re = re.compile(r"^(?P<year>[0-9]{4})-(?P<month>[0-9]{2})-"
"(?P<day>[0-9]{2})T(?P<hour>[0-9]{2}):"
"(?P<min>[0-9]{2}):(?P<sec>[0-9]{2}"
"(?:\.[0-9]+)?)$")
# Default global debugging mode
debug = True
def unpack_1bit(data):
"""Unpack 1-bit data that has been read in as bytes.
Input:
data: array of bits packed into an array of bytes.
Output:
outdata: unpacked array. The size of this array will
be eight times the size of the input data.
"""
b0 = np.bitwise_and(data >> 0x07, 0x01)
b1 = np.bitwise_and(data >> 0x06, 0x01)
b2 = np.bitwise_and(data >> 0x05, 0x01)
b3 = np.bitwise_and(data >> 0x04, 0x01)
b4 = np.bitwise_and(data >> 0x03, 0x01)
b5 = np.bitwise_and(data >> 0x02, 0x01)
b6 = np.bitwise_and(data >> 0x01, 0x01)
b7 = np.bitwise_and(data, 0x01)
return np.dstack([b0, b1, b2, b3, b4, b5, b6, b7]).flatten()
def unpack_2bit(data):
"""Unpack 2-bit data that has been read in as bytes.
Input:
data: array of unsigned 2-bit ints packed into
an array of bytes.
Output:
outdata: unpacked array. The size of this array will
be four times the size of the input data.
"""
piece0 = np.bitwise_and(data >> 0x06, 0x03)
piece1 = np.bitwise_and(data >> 0x04, 0x03)
piece2 = np.bitwise_and(data >> 0x02, 0x03)
piece3 = np.bitwise_and(data, 0x03)
return np.dstack([piece0, piece1, piece2, piece3]).flatten()
def unpack_4bit(data):
"""Unpack 4-bit data that has been read in as bytes.
Input:
data4bit: array of unsigned 4-bit ints packed into
an array of bytes.
Output:
outdata: unpacked array. The size of this array will
be twice the size of the input data.
"""
piece0 = np.bitwise_and(data >> 0x04, 0x0F)
piece1 = np.bitwise_and(data, 0x0F)
return np.dstack([piece0, piece1]).flatten()
class PsrfitsFile(object):
def __init__(self, psrfitsfn):
if not os.path.isfile(psrfitsfn):
raise ValueError("ERROR: File does not exist!\n\t(%s)" %
psrfitsfn)
self.filename = psrfitsfn
self.fits = pyfits.open(psrfitsfn, mode='readonly', memmap=True)
self.specinfo = SpectraInfo([psrfitsfn])
self.header = self.fits[0].header # Primary HDU
self.nbits = self.specinfo.bits_per_sample
self.nchan = self.specinfo.num_channels
self.npoln = self.specinfo.num_polns
self.poln_order = self.specinfo.poln_order
self.nsamp_per_subint = self.specinfo.spectra_per_subint
self.nsubints = self.specinfo.num_subint[0]
self.freqs = self.fits['SUBINT'].data[0]['DAT_FREQ']
self.frequencies = self.freqs # Alias
self.tsamp = self.specinfo.dt
self.nspec = self.specinfo.N
self.zero_off = self.specinfo.zero_off
def read_subint(self, isub, apply_weights=True, apply_scales=True,
apply_offsets=True, apply_zero_off=True,
total_intensity=True):
"""
Read a PSRFITS subint from a open pyfits file object.
Applys scales, weights, and offsets to the data.
Inputs:
isub: index of subint (first subint is 0)
apply_weights: If True, apply weights.
(Default: apply weights)
apply_scales: If True, apply scales.
(Default: apply scales)
apply_offsets: If True, apply offsets.
(Default: apply offsets)
apply_zero_off: If True, apply ZERO_OFF
(Default: apply ZERO_OFF)
total_intensity: If True, and data have 2+ polns, average them.
(Default: return just total intensity)
Output:
data: Subint data with scales, weights, and offsets
applied in float32 dtype with shape (nsamps,(npoln),nchan).
"""
sdata = self.fits['SUBINT'].data[isub]['DATA']
shp = sdata.squeeze().shape
if self.nbits < 8: # Unpack the bytes data
if (shp[0] != self.nsamp_per_subint) and \
(shp[1] != self.nchan * self.npoln * self.nbits // 8):
sdata = sdata.reshape(self.nsamp_per_subint,
self.nchan * self.npoln * self.nbits // 8)
if self.nbits == 4:
sdata = unpack_4bit(sdata)
elif self.nbits == 2:
sdata = unpack_2bit(sdata)
elif self.nbits == 1:
sdata = unpack_1bit(sdata)
data = np.asarray(sdata, dtype=np.float32)
if apply_zero_off:
data += self.zero_off
if apply_scales or apply_offsets:
data = data.reshape((self.nsamp_per_subint, self.npoln * self.nchan))
if apply_scales:
data *= self.get_scales(isub)
if apply_offsets:
data += self.get_offsets(isub)
if apply_weights:
data = data.reshape((self.nsamp_per_subint * self.npoln, self.nchan))
data *= self.get_weights(isub)
if self.npoln > 1:
data = data.reshape((self.nsamp_per_subint, self.npoln, self.nchan))
else:
data = data.reshape((self.nsamp_per_subint, self.nchan))
# Handle 4-poln coherence data or 2-poln data
if (total_intensity and len(shp)==3 and shp[1]==self.npoln and
(self.poln_order=="AABBCRCI" or self.poln_order=="AABB")):
warnings.warn("Polarization is %s, averaging AA and BB"%self.poln_order)
data = 0.5*(data[:,0,:] + data[:,1,:])
elif (total_intensity and len(shp)==3 and shp[1]==self.npoln and
self.poln_order=="IQUV"):
warnings.warn("Polarization is IQUV, just using Stokes I")
data = data[:,0,:]
return data
def get_weights(self, isub):
"""Return weights for a particular subint.
Inputs:
isub: index of subint (first subint is 0)
Output:
weights: Subint weights. (There is one value for each channel)
"""
return self.fits['SUBINT'].data[isub]['DAT_WTS']
def get_scales(self, isub):
"""Return scales for a particular subint.
Inputs:
isub: index of subint (first subint is 0)
Output:
scales: Subint scales. (There is one value for each channel * npoln)
"""
return self.fits['SUBINT'].data[isub]['DAT_SCL']
def get_offsets(self, isub):
"""Return offsets for a particular subint.
Inputs:
isub: index of subint (first subint is 0)
Output:
offsets: Subint offsets. (There is one value for each channel * npoln)
"""
return self.fits['SUBINT'].data[isub]['DAT_OFFS']
def get_spectra(self, startsamp, N):
"""Return 2D array of data from PSRFITS file.
Inputs:
startsamp, Starting sample
N: number of samples to read
Output:
data: 2D numpy array
"""
# Calculate starting subint and ending subint
startsub = int(startsamp/self.nsamp_per_subint)
skip = startsamp - (startsub*self.nsamp_per_subint)
endsub = int((startsamp+N)/self.nsamp_per_subint)
trunc = ((endsub+1)*self.nsamp_per_subint) - (startsamp+N)
# Read data
data = []
for isub in range(startsub, endsub+1):
data.append(self.read_subint(isub))
if len(data) > 1:
data = np.concatenate(data)
else:
data = np.array(data).squeeze()
data = np.transpose(data)
# Truncate data to desired interval
if trunc > 0:
data = data[:, skip:-trunc]
elif trunc == 0:
data = data[:, skip:]
else:
raise ValueError("Number of bins to truncate is negative: %d" % trunc)
if not self.specinfo.need_flipband:
# for psrfits module freqs go from low to high.
# spectra module expects high frequency first.
data = data[::-1,:]
freqs = self.freqs[::-1]
else:
freqs = self.freqs
return spectra.Spectra(freqs, self.tsamp, data,
starttime=self.tsamp*startsamp, dm=0)
class SpectraInfo(object):
def __init__(self, filenames):
self.filenames = filenames
self.num_files = len(filenames)
self.N = 0
self.user_poln = 0
self.default_poln = 0
# Initialise a few arrays
self.start_MJD = np.empty(self.num_files)
self.num_subint = np.empty(self.num_files)
self.start_subint = np.empty(self.num_files)
self.start_spec = np.empty(self.num_files)
self.num_pad = np.empty(self.num_files)
self.num_spec = np.empty(self.num_files)
# The following should default to False
self.need_scale = False
self.need_offset = False
self.need_weight = False
self.need_flipband = False
for ii, fn in enumerate(filenames):
if not is_PSRFITS(fn):
raise ValueError("File '%s' does not appear to be PSRFITS!" % fn)
# Open the PSRFITS file
hdus = pyfits.open(fn, mode='readonly', memmap=True)
if ii==0:
self.hdu_names = [hdu.name for hdu in hdus]
primary = hdus['PRIMARY'].header
if 'TELESCOP' not in list(primary.keys()):
telescope = ""
else:
telescope = primary['TELESCOP']
# Quick fix for MockSpec data...
if telescope == "ARECIBO 305m":
telescope = "Arecibo"
if ii == 0:
self.telescope = telescope
else:
if telescope != self.telescope[0]:
warnings.warn("'TELESCOP' values don't match for files 0 and %d!" % ii)
self.observer = primary['OBSERVER']
self.source = primary['SRC_NAME']
self.frontend = primary['FRONTEND']
self.backend = primary['BACKEND']
self.project_id = primary['PROJID']
self.date_obs = primary['DATE-OBS']
self.poln_type = primary['FD_POLN']
self.ra_str = primary['RA']
self.dec_str = primary['DEC']
self.fctr = primary['OBSFREQ']
self.orig_num_chan = primary['OBSNCHAN']
self.orig_df = primary['OBSBW']
self.beam_FWHM = primary['BMIN']
# CHAN_DM card is not in earlier versions of PSRFITS
if 'CHAN_DM' not in list(primary.keys()):
self.chan_dm = 0.0
else:
self.chan_dm = primary['CHAN_DM']
self.start_MJD[ii] = primary['STT_IMJD'] + (primary['STT_SMJD'] + \
primary['STT_OFFS'])/pc.SECPERDAY
# Are we tracking
track = (primary['TRK_MODE'] == "TRACK")
if ii==0:
self.tracking = track
else:
if track != self.tracking:
warnings.warn("'TRK_MODE' values don't match for files 0 and %d" % ii)
# Now switch to the subint HDU header
subint = hdus['SUBINT'].header
self.dt = subint['TBIN']
self.num_channels = subint['NCHAN']
self.num_polns = subint['NPOL']
# PRESTO's 'psrfits.c' has some settings based on environ variables
envval = os.getenv("PSRFITS_POLN")
if envval is not None:
ival = int(envval)
if ((ival > -1) and (ival < self.num_polns)):
print("Using polarisation %d (from 0-%d) from PSRFITS_POLN." % \
(ival, self.num_polns-1))
self.default_poln = ival
self.user_poln = 1
self.poln_order = subint['POL_TYPE']
if subint['NCHNOFFS'] > 0:
warnings.warn("first freq channel is not 0 in file %d" % ii)
self.spectra_per_subint = subint['NSBLK']
self.bits_per_sample = subint['NBITS']
self.num_subint[ii] = subint['NAXIS2']
self.start_subint[ii] = subint['NSUBOFFS']
self.time_per_subint = self.dt * self.spectra_per_subint
# ZERO_OFF is not in earlier versions of PSRFITS
if 'ZERO_OFF' not in list(subint.keys()):
self.zero_off = 0.0
else:
self.zero_off = np.fabs(subint['ZERO_OFF'])
# This is the MJD offset based on the starting subint number
MJDf = (self.time_per_subint * self.start_subint[ii])/pc.SECPERDAY
# The start_MJD values should always be correct
self.start_MJD[ii] += MJDf
# Compute the starting spectra from the times
MJDf = self.start_MJD[ii] - self.start_MJD[0]
if MJDf < 0.0:
raise ValueError("File %d seems to be from before file 0!" % ii)
self.start_spec[ii] = (MJDf * pc.SECPERDAY / self.dt + 0.5)
# Now pull stuff from the columns
subint_hdu = hdus['SUBINT']
first_subint = subint_hdu.data[0]
# Identify the OFFS_SUB column number
if 'OFFS_SUB' not in subint_hdu.columns.names:
warnings.warn("Can't find the 'OFFS_SUB' column!")
else:
colnum = subint_hdu.columns.names.index('OFFS_SUB')
if ii==0:
self.offs_sub_col = colnum
elif self.offs_sub_col != colnum:
warnings.warn("'OFFS_SUB' column changes between files 0 and %d!" % ii)
# Identify the data column and the data type
if 'DATA' not in subint_hdu.columns.names:
warnings.warn("Can't find the 'DATA' column!")
else:
colnum = subint_hdu.columns.names.index('DATA')
if ii==0:
self.data_col = colnum
self.FITS_typecode = subint_hdu.columns[self.data_col].format[-1]
elif self.data_col != colnum:
warnings.warn("'DATA' column changes between files 0 and %d!" % ii)
# Telescope azimuth
if 'TEL_AZ' not in subint_hdu.columns.names:
self.azimuth = 0.0
else:
colnum = subint_hdu.columns.names.index('TEL_AZ')
if ii==0:
self.tel_az_col = colnum
self.azimuth = first_subint['TEL_AZ']
# Telescope zenith angle
if 'TEL_ZEN' not in subint_hdu.columns.names:
self.zenith_ang = 0.0
else:
colnum = subint_hdu.columns.names.index('TEL_ZEN')
if ii==0:
self.tel_zen_col = colnum
self.zenith_ang = first_subint['TEL_ZEN']
# Observing frequencies
if 'DAT_FREQ' not in subint_hdu.columns.names:
warnings.warn("Can't find the channel freq column, 'DAT_FREQ'!")
else:
colnum = subint_hdu.columns.names.index('DAT_FREQ')
freqs = first_subint['DAT_FREQ']
if ii==0:
self.freqs_col = colnum
self.df = freqs[1]-freqs[0]
self.lo_freq = freqs[0]
self.hi_freq = freqs[-1]
# Now check that the channel spacing is the same throughout
ftmp = freqs[1:] - freqs[:-1]
if np.any((ftmp - self.df)) > 1e-7:
warnings.warn("Channel spacing changes in file %d!" % ii)
else:
ftmp = np.abs(self.df - (freqs[1]-freqs[0]))
if ftmp > 1e-7:
warnings.warn("Channel spacing between files 0 and %d!" % ii)
ftmp = np.abs(self.lo_freq-freqs[0])
if ftmp > 1e-7:
warnings.warn("Low channel changes between files 0 and %d!" % ii)
ftmp = np.abs(self.hi_freq-freqs[-1])
if ftmp > 1e-7:
warnings.warn("High channel changes between files 0 and %d!" % ii)
# Data weights
if 'DAT_WTS' not in subint_hdu.columns.names:
warnings.warn("Can't find the channel weights column, 'DAT_WTS'!")
else:
colnum = subint_hdu.columns.names.index('DAT_WTS')
if ii==0:
self.dat_wts_col = colnum
elif self.dat_wts_col != colnum:
warnings.warn("'DAT_WTS column changes between files 0 and %d!" % ii)
if np.any(first_subint['DAT_WTS'] != 1.0):
self.need_weight = True
# Data offsets
if 'DAT_OFFS' not in subint_hdu.columns.names:
warnings.warn("Can't find the channel offsets column, 'DAT_OFFS'!")
else:
colnum = subint_hdu.columns.names.index('DAT_OFFS')
if ii==0:
self.dat_offs_col = colnum
elif self.dat_offs_col != colnum:
warnings.warn("'DAT_OFFS column changes between files 0 and %d!" % ii)
if np.any(first_subint['DAT_OFFS'] != 0.0):
self.need_offset = True
# Data scalings
if 'DAT_SCL' not in subint_hdu.columns.names:
warnings.warn("Can't find the channel scalings column, 'DAT_SCL'!")
else:
colnum = subint_hdu.columns.names.index('DAT_SCL')
if ii==0:
self.dat_scl_col = colnum
elif self.dat_scl_col != colnum:
warnings.warn("'DAT_SCL' column changes between files 0 and %d!" % ii)
if np.any(first_subint['DAT_SCL'] != 1.0):
self.need_scale = True
# Comute the samples per file and the amount of padding
# that the _previous_ file has
self.num_pad[ii] = 0
self.num_spec[ii] = self.spectra_per_subint * self.num_subint[ii]
if ii>0:
if self.start_spec[ii] > self.N: # Need padding
self.num_pad[ii-1] = self.start_spec[ii] - self.N
self.N += self.num_pad[ii-1]
self.N += self.num_spec[ii]
# Finished looping through PSRFITS files. Finalise a few things.
# Convert the position strings into degrees
self.ra2000 = coordinates.Angle(self.ra_str,unit=units.hourangle).deg
self.dec2000 = coordinates.Angle(self.dec_str,unit=units.deg).deg
# Are the polarisations summed?
if self.poln_order in ["AA+BB", "INTEN", "IQUV"]:
self.summed_polns = True
else:
self.summed_polns = False
# Calculate some others
self.T = self.N * self.dt
self.orig_df /= float(self.orig_num_chan)
self.samples_per_spectra = self.num_polns * self.num_channels
# Note: the following is the number of bytes that will be in
# the returned array.
if self.bits_per_sample < 8:
self.bytes_per_spectra = self.samples_per_spectra
else:
self.bytes_per_spectra = (self.bits_per_sample * self.samples_per_spectra) // 8
self.samples_per_subint = self.samples_per_spectra * self.spectra_per_subint
self.bytes_per_subint = self.bytes_per_spectra * self.spectra_per_subint
# Flip the band?
if self.hi_freq < self.lo_freq:
tmp = self.hi_freq
self.hi_freq = self.lo_freq
self.lo_freq = tmp
self.df *= -1.0
self.need_flipband = True
# Compute the bandwidth
self.BW = self.num_channels * self.df
self.mjd = int(self.start_MJD[0])
self.secs = (self.start_MJD[0] % 1)*pc.SECPERDAY
def __str__(self):
"""Format spectra_info's information into a easy to
read string and return it.
"""
result = [] # list of strings. Will be concatenated with newlines (\n).
result.append("From the PSRFITS file '%s':" % self.filenames[0])
result.append(" HDUs = %s" % ', '.join(self.hdu_names))
result.append(" Telescope = %s" % self.telescope)
result.append(" Observer = %s" % self.observer)
result.append(" Source Name = %s" % self.source)
result.append(" Frontend = %s" % self.frontend)
result.append(" Backend = %s" % self.backend)
result.append(" Project ID = %s" % self.project_id)
# result.append(" Scan Number = %s" % self.scan_number)
result.append(" Obs Date String = %s" % self.date_obs)
imjd, fmjd = DATEOBS_to_MJD(self.date_obs)
mjdtmp = "%.14f" % fmjd
result.append(" MJD start time (DATE-OBS) = %5d.%14s" % (imjd, mjdtmp[2:]))
result.append(" MJD start time (STT_*) = %19.14f" % self.start_MJD[0])
result.append(" RA J2000 = %s" % self.ra_str)
result.append(" RA J2000 (deg) = %-17.15g" % self.ra2000)
result.append(" Dec J2000 = %s" % self.dec_str)
result.append(" Dec J2000 (deg) = %-17.15g" % self.dec2000)
result.append(" Tracking? = %s" % self.tracking)
result.append(" Azimuth (deg) = %-.7g" % self.azimuth)
result.append(" Zenith Ang (deg) = %-.7g" % self.zenith_ang)
result.append(" Polarisation type = %s" % self.poln_type)
if (self.num_polns>=2) and (not self.summed_polns):
numpolns = "%d" % self.num_polns
elif self.summed_polns:
numpolns = "2 (summed)"
else:
numpolns = "1"
result.append(" Number of polns = %s" % numpolns)
result.append(" Polarisation oder = %s" % self.poln_order)
result.append(" Sample time (us) = %-17.15g" % (self.dt * 1e6))
result.append(" Central freq (MHz) = %-17.15g" % self.fctr)
result.append(" Low channel (MHz) = %-17.15g" % self.lo_freq)
result.append(" High channel (MHz) = %-17.15g" % self.hi_freq)
result.append(" Channel width (MHz) = %-17.15g" % self.df)
result.append(" Number of channels = %d" % self.num_channels)
if self.chan_dm != 0.0:
result.append(" Orig Channel width (MHz) = %-17.15g" % self.orig_df)
result.append(" Orig Number of channels = %d" % self.orig_num_chan)
result.append(" DM used for chan dedisp = %-17.15g" % self.chan_dm)
result.append(" Total Bandwidth (MHz) = %-17.15g" % self.BW)
result.append(" Spectra per subint = %d" % self.spectra_per_subint)
result.append(" Starting subint = %d" % self.start_subint[0])
result.append(" Subints per file = %d" % self.num_subint[0])
result.append(" Spectra per file = %d" % self.num_spec[0])
result.append(" Time per file (sec) = %-.12g" % (self.num_spec[0]*self.dt))
result.append(" FITS typecode = %s" % self.FITS_typecode)
if debug:
result.append(" DATA column = %d" % self.data_col)
result.append(" bits per sample = %d" % self.bits_per_sample)
if self.bits_per_sample < 8:
spectmp = (self.bytes_per_spectra * self.bits_per_sample) // 8
subtmp = (self.bytes_per_subint * self.bits_per_sample) // 8
else:
spectmp = self.bytes_per_spectra
subtmp = self.bytes_per_subint
result.append(" bytes per spectra = %d" % spectmp)
result.append(" samples per spectra = %d" % self.samples_per_spectra)
result.append(" bytes per subint = %d" % subtmp)
result.append(" samples per subint = %d" % self.samples_per_subint)
result.append(" Need scaling? = %s" % self.need_scale)
result.append(" Need offsets? = %s" % self.need_offset)
result.append(" Need weights? = %s" % self.need_weight)
result.append(" Need band inverted? = %s" % self.need_flipband)
return '\n'.join(result)
def __getitem__(self, key):
return getattr(self, key)
def DATEOBS_to_MJD(dateobs):
"""Convert DATE-OBS string from PSRFITS primary HDU to a MJD.
Returns a 2-tuple:
(integer part of MJD, fractional part of MJD)
"""
# Parse string using regular expression defined at top of file
m = date_obs_re.match(dateobs)
mjd_fracday = (float(m.group("hour")) + (float(m.group("min")) + \
(float(m.group("sec")) / 60.0)) / 60.0) / 24.0
mjd_day = aptime.Time("%d-%d-%d" % (float(m.group("year")), \
float(m.group("month")),float(m.group("day"))), format="iso").mjd
return mjd_day, mjd_fracday
def is_PSRFITS(filename):
"""Return True if filename appears to be PSRFITS format.
Return False otherwise.
"""
hdus = pyfits.open(filename, mode='readonly', memmap=True)
primary = hdus['PRIMARY'].header
try:
isPSRFITS = ((primary['FITSTYPE'] == "PSRFITS") and \
(primary['OBS_MODE'] == "SEARCH"))
except KeyError:
isPSRFITS = False
hdus.close()
return isPSRFITS
def debug_mode(mode=None):
"""Set debugging mode.
If 'mode' is None return current debug mode.
"""
global debug
if mode is None:
return debug
else:
debug = bool(mode)
def main():
specinf = SpectraInfo(args.files)
if args.output is not None:
print(args.output % specinf)
else:
if debug:
print("Reading '%s'" % args.files[0])
print(specinf)
if __name__=='__main__':
parser = argparse.ArgumentParser(description="Get info about PSRFITS files.")
parser.add_argument("files", metavar="FILE", nargs='+', \
type=str, help="PSRFITS files.")
parser.add_argument("-o", type=str, dest="output", default=None, \
help="String to interpolate with info about the file.")
args = parser.parse_args()
main()
| 27,914 | 41.359636 | 91 | py |
presto | presto-master/python/presto/filterbank.py | """
A module for reading filterbank files.
Patrick Lazarus, June 26, 2012
(Minor modification from file originally from June 6th, 2009)
"""
from __future__ import print_function
from __future__ import absolute_import
from builtins import object
import sys
import os
import os.path
import numpy as np
from presto import sigproc
from presto import spectra
DEBUG = False
def create_filterbank_file(outfn, header, spectra=None, nbits=8, \
verbose=False, mode='append'):
"""Write filterbank header and spectra to file.
Input:
outfn: The outfile filterbank file's name.
header: A dictionary of header paramters and values.
spectra: Spectra to write to file. (Default: don't write
any spectra - i.e. write out header only)
nbits: The number of bits per sample of the filterbank file.
This value always overrides the value in the header dictionary.
(Default: 8 - i.e. each sample is an 8-bit integer)
verbose: If True, be verbose (Default: be quiet)
mode: Mode for writing (can be 'append' or 'write')
Output:
fbfile: The resulting FilterbankFile object opened
in read-write mode.
"""
dtype = get_dtype(nbits) # Get dtype. This will check to ensure
# 'nbits' is valid.
header['nbits'] = nbits
outfile = open(outfn, 'wb')
outfile.write(sigproc.addto_hdr("HEADER_START", None))
for paramname in list(header.keys()):
if paramname not in sigproc.header_params:
# Only add recognized parameters
continue
if verbose:
print("Writing header param (%s)" % paramname)
value = header[paramname]
outfile.write(sigproc.addto_hdr(paramname, value))
outfile.write(sigproc.addto_hdr("HEADER_END", None))
if spectra is not None:
spectra.flatten().astype(dtype).tofile(outfile)
outfile.close()
return FilterbankFile(outfn, mode=mode)
def is_float(nbits):
"""For a given number of bits per sample return
true if it corresponds to floating-point samples
in filterbank files.
Input:
nbits: Number of bits per sample, as recorded in the filterbank
file's header.
Output:
isfloat: True, if 'nbits' indicates the data in the file
are encoded as floats.
"""
check_nbits(nbits)
if nbits == 32:
return True
else:
return False
def check_nbits(nbits):
"""Given a number of bits per sample check to make
sure 'filterbank.py' can cope with it.
An exception is raise if 'filterbank.py' cannot cope.
Input:
nbits: Number of bits per sample, as recorded in the filterbank
file's header.
Output:
None
"""
if nbits not in [32, 16, 8]:
raise ValueError("'filterbank.py' only supports " \
"files with 8- or 16-bit " \
"integers, or 32-bit floats " \
"(nbits provided: %g)!" % nbits)
def get_dtype(nbits):
"""For a given number of bits per sample return
a numpy-recognized dtype.
Input:
nbits: Number of bits per sample, as recorded in the filterbank
file's header.
Output:
dtype: A numpy-recognized dtype string.
"""
check_nbits(nbits)
if is_float(nbits):
dtype = 'float%d' % nbits
else:
dtype = 'uint%d' % nbits
return dtype
def read_header(filename, verbose=False):
"""Read the header of a filterbank file, and return
a dictionary of header paramters and the header's
size in bytes.
Inputs:
filename: Name of the filterbank file.
verbose: If True, be verbose. (Default: be quiet)
Outputs:
header: A dictionary of header paramters.
header_size: The size of the header in bytes.
"""
header = {}
filfile = open(filename, 'rb')
filfile.seek(0)
paramname = ""
while (paramname != 'HEADER_END'):
if verbose:
print("File location: %d" % filfile.tell())
paramname, val = sigproc.read_hdr_val(filfile, stdout=verbose)
if verbose:
print("Read param %s (value: %s)" % (paramname, val))
if paramname not in ["HEADER_START", "HEADER_END"]:
header[paramname] = val
header_size = filfile.tell()
filfile.close()
return header, header_size
class FilterbankFile(object):
def __init__(self, filfn, mode='readonly'):
self.filename = filfn
self.filfile = None
if not os.path.isfile(filfn):
raise ValueError("ERROR: File does not exist!\n\t(%s)" % filfn)
self.header, self.header_size = read_header(self.filename)
self.frequencies = self.fch1 + self.foff*np.arange(self.nchans)
self.is_hifreq_first = (self.foff < 0)
self.bytes_per_spectrum = self.nchans*self.nbits // 8
data_size = os.path.getsize(self.filename)-self.header_size
self.nspec = data_size // self.bytes_per_spectrum
# Check if this file is a folded-filterbank file
if 'npuls' in self.header and 'period' in self.header and \
'nbins' in self.header and 'tsamp' not in self.header:
# Foleded file
self.isfold = True
self.dt = self.period/self.nbins
else:
self.isfold = False
self.dt = self.tsamp
# Get info about dtype
self.dtype = get_dtype(self.nbits)
if is_float(self.nbits):
tinfo = np.finfo(self.dtype)
else:
tinfo = np.iinfo(self.dtype)
self.dtype_min = tinfo.min
self.dtype_max = tinfo.max
if mode.lower() in ('read', 'readonly'):
self.filfile = open(self.filename, 'rb')
elif mode.lower() in ('write', 'readwrite'):
self.filfile = open(self.filename, 'r+b')
elif mode.lower() == 'append':
self.filfile = open(self.filename, 'a+b')
else:
raise ValueError("Unrecognized mode (%s)!" % mode)
@property
def freqs(self):
# Alias for frequencies
return self.frequencies
@property
def nchan(self):
# more aliases..
return self.nchans
def close(self):
if self.filfile is not None:
self.filfile.close()
def get_timeslice(self, start, stop):
startspec = int(np.round(start/self.tsamp))
stopspec = int(np.round(stop/self.tsamp))
return self.get_spectra(startspec, stopspec-startspec)
def get_spectra(self, start, nspec):
stop = min(start+nspec, self.nspec)
pos = self.header_size+start*self.bytes_per_spectrum
# Compute number of elements to read
nspec = int(stop) - int(start)
num_to_read = nspec*self.nchans
num_to_read = max(0, num_to_read)
self.filfile.seek(pos, os.SEEK_SET)
spectra_dat = np.fromfile(self.filfile, dtype=self.dtype,
count=num_to_read)
spectra_dat.shape = nspec, self.nchans
spec = spectra.Spectra(self.freqs, self.tsamp, spectra_dat.T,
starttime=start*self.tsamp, dm=0.0)
return spec
def append_spectra(self, spectra):
"""Append spectra to the file if is not read-only.
Input:
spectra: The spectra to append. The new spectra
must have the correct number of channels (ie
dimension of axis=1.
Outputs:
None
"""
if self.filfile.mode.lower() in ('r', 'rb'):
raise ValueError("FilterbankFile object for '%s' is read-only." % \
self.filename)
nspec, nchans = spectra.shape
if nchans != self.nchans:
raise ValueError("Cannot append spectra. Incorrect shape. " \
"Number of channels in file: %d; Number of " \
"channels in spectra to append: %d" % \
(self.nchans, nchans))
data = spectra.flatten()
np.clip(data, self.dtype_min, self.dtype_max, out=data)
# Move to end of file
self.filfile.seek(0, os.SEEK_END)
self.filfile.write(data.astype(self.dtype))
self.nspec += nspec
#self.filfile.flush()
#os.fsync(self.filfile)
def write_spectra(self, spectra, ispec):
"""Write spectra to the file if is writable.
Input:
spectra: The spectra to append. The new spectra
must have the correct number of channels (ie
dimension of axis=1.
ispec: The index of the spectrum of where to start writing.
Outputs:
None
"""
if 'r+' not in self.filfile.mode.lower():
raise ValueError("FilterbankFile object for '%s' is not writable." % \
self.filename)
nspec, nchans = spectra.shape
if nchans != self.nchans:
raise ValueError("Cannot write spectra. Incorrect shape. " \
"Number of channels in file: %d; Number of " \
"channels in spectra to write: %d" % \
(self.nchans, nchans))
if ispec > self.nspec:
raise ValueError("Cannot write past end of file! " \
"Present number of spectra: %d; " \
"Requested index of write: %d" % \
(self.nspec, ispec))
data = spectra.flatten()
np.clip(data, self.dtype_min, self.dtype_max, out=data)
# Move to requested position
pos = self.header_size + ispec*self.bytes_per_spectrum
self.filfile.seek(pos, os.SEEK_SET)
self.filfile.write(data.astype(self.dtype))
if nspec+ispec > self.nspec:
self.nspec = nspec+ispec
def __getattr__(self, name):
if name in self.header:
if DEBUG:
print("Fetching header param (%s)" % name)
val = self.header[name]
else:
raise ValueError("No FilterbankFile attribute called '%s'" % name)
return val
def print_header(self):
"""Print header parameters and values.
"""
for param in sorted(self.header.keys()):
if param in ("HEADER_START", "HEADER_END"):
continue
print("%s: %s" % (param, self.header[param]))
def main():
fil = FilterbankFile(sys.argv[1])
fil.print_header()
if __name__ == '__main__':
main()
| 10,901 | 33.830671 | 82 | py |
presto | presto-master/python/presto/sinc_interp.py | from __future__ import print_function
from __future__ import absolute_import
import numpy as Num
import numpy.fft as FFT
def kaiser_window(xs, halfwidth, alpha):
"""
kaiser_window(xs, halfwidth, alpha):
Return the kaiser window function for the values 'xs' when the
the half-width of the window should be 'haldwidth' with
the folloff parameter 'alpha'. The following values are
particularly interesting:
alpha
-----
0 Rectangular Window
5 Similar to Hamming window
6 Similar to Hanning window
8.6 Almost identical to the Blackman window
"""
# TODO: (gijs) bug, i0 not defined
win = i0(alpha*Num.sqrt(1.0-(xs/halfwidth)**2.0))/i0(alpha)
return Num.where(Num.fabs(xs)<=halfwidth, win, 0.0)
def hanning_window(xs, halfwidth):
"""
hanning_window(xs, halfwidth):
Return the Hanning window of halfwidth 'halfwidth' evaluated at
the values 'xs'.
"""
win = 0.5 + 0.5*Num.cos(Num.pi*xs/halfwidth)
return Num.where(Num.fabs(xs)<=halfwidth, win, 0.0)
def hamming_window(xs, halfwidth):
"""
hamming_window(xs, halfwidth):
Return the Hamming window of halfwidth 'halfwidth' evaluated at
the values 'xs'.
"""
win = 0.54 + 0.46*Num.cos(Num.pi*xs/halfwidth)
return Num.where(Num.fabs(xs)<=halfwidth, win, 0.0)
def blackman_window(xs, halfwidth):
"""
blackman_window(xs, halfwidth):
Return the Blackman window of halfwidth 'halfwidth' evaluated at
the values 'xs'.
"""
rat = Num.pi*xs/halfwidth
win = 0.42 + 0.5*Num.cos(rat) + 0.08*Num.cos(2.0*rat)
return Num.where(Num.fabs(xs)<=halfwidth, win, 0.0)
def rectangular_window(xs, halfwidth):
"""
rectangular_window(xs, halfwidth):
Return a rectangular window of halfwidth 'halfwidth' evaluated at
the values 'xs'.
"""
return Num.where(Num.fabs(xs)<=halfwidth, 1.0, 0.0)
_window_function = {"rectangular": rectangular_window,
"none": rectangular_window,
"hanning": hanning_window,
"hamming": hamming_window,
"blackman": blackman_window,
"kaiser": kaiser_window}
def windowed_sinc_interp(data, newx, halfwidth=None,
window='hanning', alpha=6.0):
"""
windowed_sinc_interp(data, newx, halfwidth=None,
window='hanning', alpha=6.0):
Return a single windowed-sinc-interpolated point from the data.
"""
if Num.fabs(round(newx)-newx) < 1e-5:
return data[int(round(newx))]
num_pts = (int(Num.floor(newx)), len(data)-int(Num.ceil(newx))-1)
if halfwidth is None:
halfwidth = min(num_pts)
lo_pt = int(Num.floor(newx)) - halfwidth
if lo_pt < 0:
lo_pt < 0
print("Warning: trying to access below the lowest index!")
hi_pt = lo_pt + 2*halfwidth
if hi_pt >= len(data):
hi_pt = len(data)-1
print("Warning: trying to access above the highest index!")
halfwidth = (hi_pt-lo_pt)//2
pts = Num.arange(2*halfwidth)+lo_pt
xs = newx - pts
if window.lower() is "kaiser":
win = _window_function[window](xs, len(data)//2, alpha)
else:
win = _window_function[window](xs, len(data)//2)
return Num.add.reduce(Num.take(data, pts) * win * Num.sinc(xs))
def periodic_interp(data, zoomfact, window='hanning', alpha=6.0):
"""
periodic_interp(data, zoomfact, window='hanning', alpha=6.0):
Return a periodic, windowed, sinc-interpolation of the data which
is oversampled by a factor of 'zoomfact'.
"""
zoomfact = int(zoomfact)
if (zoomfact < 1):
print("zoomfact must be >= 1.")
return 0.0
elif zoomfact==1:
return data
newN = len(data)*zoomfact
# Space out the data
comb = Num.zeros((zoomfact, len(data)), dtype='d')
comb[0] += data
comb = Num.reshape(Num.transpose(comb), (newN,))
# Compute the offsets
xs = Num.zeros(newN, dtype='d')
xs[:newN//2+1] = Num.arange(newN//2+1, dtype='d')/zoomfact
xs[-newN//2:] = xs[::-1][newN//2-1:-1]
# Calculate the sinc times window for the kernel
if window.lower()=="kaiser":
win = _window_function[window](xs, len(data)//2, alpha)
else:
win = _window_function[window](xs, len(data)//2)
kernel = win * Num.sinc(xs)
if (0):
plotxy(Num.sinc(xs), color='yellow')
plotxy(win)
plotxy(kernel, color='red')
closeplot()
return FFT.irfft(FFT.rfft(kernel) * FFT.rfft(comb))
if __name__=='__main__':
from presto.psr_utils import *
from presto.Pgplot import *
from numpy.random import normal
# from spline import *
fwhm = 0.01
ctr_phase = 0.505
noise_sigma = 0.2
# The theoretical profile with noise
Ntheo = 1000
theo = gaussian_profile(Ntheo, ctr_phase, fwhm) + normal(0.0, noise_sigma, Ntheo)
theo_phases = Num.arange(Ntheo, dtype='d')/Ntheo
# The "sampled" data
Ndata = 100
data = theo[::Ntheo//Ndata]
data_phases = theo_phases[::Ntheo//Ndata]
# The values to interpolate
Ncalc = 30
lo_calc = ctr_phase-0.05
hi_calc = ctr_phase+0.05
calc_phases = span(lo_calc, hi_calc, Ncalc)
plotxy(theo, theo_phases, rangex=[lo_calc-0.2, hi_calc+0.2])
plotxy(data, data_phases, line=None, symbol=3, color='green')
# Do the interpolation one point at a time
halfwidth = Ndata//2-5
calc_vals = []
for phs in calc_phases:
calc_vals.append(windowed_sinc_interp(data, phs*len(data), halfwidth))
plotxy(calc_vals, calc_phases, line=None, symbol=3, color='red')
# Interpolate the full profile using convolution
zoomfact = 10
newvals = periodic_interp(data, 10)
new_phases = Num.arange(Ndata*zoomfact, dtype='d')/(Ndata*zoomfact)
plotxy(newvals, new_phases, line=1, symbol=None, color='yellow')
# Interpolate using cubic splines
if (0):
sdata = interpolate.splrep(data, data_phases, s=0)
svals = interpolate.splrep(new_phases, sdata, der=0)
plotxy(svals, new_phases, line=1, symbol=None, color='cyan')
elif (0):
sdata = Spline(data_phases, data)
svals = sdata(new_phases)
plotxy(svals, new_phases, line=1, symbol=None, color='cyan')
closeplot()
| 6,493 | 34.102703 | 85 | py |
presto | presto-master/python/presto/singlepulse/spcand.py | from __future__ import absolute_import
from builtins import object
import numpy as np
from . import bary_and_topo
def topo_timeshift(bary_start_time, time_shift, topo):
ind = np.where(topo == float(int(bary_start_time)/10*10))[0]
return time_shift[ind]
def numsub(nchans, snr):
if not nchans==960 and np.log2(nchans).is_integer(): #Puppi L-wide and GBNCC
if snr < 10:
nsub = 32
elif snr >= 10 and snr < 15:
nsub = 64
else:
nsub = 128
elif nchans == 960: #PALFA
if snr < 10:
nsub = 32
elif snr >= 10 and snr < 15:
nsub = 64
else:
nsub = 96
else:
nsub = nchans
return nsub
class params(object):
"""
A class for input parameters for the spd plots.
"""
def __init__(self):
self.start_bin = 0
self.dmfac = 0.0
self.duration = 0.0
self.nbins = 0
self.nbinsextra = 0
self.zerodm = None
self.mask = False
self.bandpass_corr = True
self.nsub = 96
self.subdm = 0.0
self.dm = 0.0
self.sweep_dm = 0.0
self.downsamp = 1
self.scaleindep = None
self.width_bins = 1
self.binratio = 50
self.sigma = 0.0
self.pulse_width = 0.0
self.start = 0.0
self.sweep_duration = 0.0
self.bary_start_time = 0.0
self.topo_start_time = 0.0
self.sample_number = 0
def read_from_file(self, params, tsamp, N, lofreq, hifreq, rawdatafile, loc_pulse = 0.5, dedisp = False, \
scaleindep = None, zerodm = None, mask = None, barytime = True, \
nsub = None, bandpass_corr = False):
"""
Set up parameters based on input from the groups.txt file.
Input: params: list of parameters (DM, S/N, Time, Sample number, downfactor)
tsamp: sampling time (downsampled: 65.5 us for PALFA)
N : The total number of time samples in the observation
lofreq: lowest observation frequency
hifreq: highest observation frequency
rawdatafile: supply a PSRFITS file instance
optional arguments:
dedisp: Do you want to dedisperse?(Type: Boolean).
scaleindep:Do you want to scale each subband independently?(Type: Boolean)
zerodm:Do you want to use zero-DM filtering?(Type: Boolean)
mask: Do you want to use a rfifind mask? (Type: Boolean)
barytime: Is the given time(s) barycentric?
bandpass_corr:Would you like to remove the bandpass? (Type: Boolean)
"""
self.subdm = params[0]
self.sigma = params[1]
topo, bary = bary_and_topo.bary_to_topo(rawdatafile.filename, rawdatafile=rawdatafile)
time_shift = bary-topo
if barytime:
#### if the times in the groups.txt file are barycentric then get the corresponding
#### topocentric times.
self.bary_start_time = params[2]
self.topo_start_time = self.bary_start_time - topo_timeshift(self.bary_start_time, \
time_shift, topo)[0]
else:
#### Vice versa.
self.topo_start_time = params[2]
self.bary_start_time = self.topo_start_time + topo_timeshift(self.bary_start_time, \
time_shift, topo)[0]
self.sample_number = params[3]
self.width_bins = params[4]
sweep_posn = 0.0
self.scaleindep = scaleindep
self.downsamp = np.round((params[2]/self.sample_number/tsamp)).astype('int')
self.duration = self.binratio * self.width_bins * tsamp * self.downsamp
self.start = self.topo_start_time - (loc_pulse * self.duration)
if (self.start<0.0):
self.start = 0.0
self.start_bin = np.round(self.start/tsamp).astype('int')
self.pulse_width = self.width_bins*self.downsamp*tsamp
if nsub is not None:
self.nsub = nsub
else:
self.nsub = numsub(rawdatafile.nchan, self.sigma)
self.zerodm = zerodm
if dedisp:
self.dm = self.subdm
self.sweep_dm = None
self.sweep_duration = 0.0
self.dmfac = 4.15e3 * np.abs(1./lofreq**2 - 1./hifreq**2)
self.nbins = np.round(self.duration/tsamp).astype('int')
self.nbinsextra = np.round((self.duration + self.dmfac * self.dm)/tsamp).astype('int')
if (self.start_bin+self.nbinsextra) > N-1:
self.nbinsextra = N-1-self.start_bin
self.bandpass_corr = bandpass_corr
else:
self.dm = None
self.sweep_dm = self.subdm
self.sweep_duration = 4.15e3 * np.abs(1./lofreq**2-1./hifreq**2)*self.sweep_dm
self.start = self.start + (loc_pulse*self.duration)
self.start_bin = np.round(self.start/tsamp).astype('int')
self.nbins = np.round(self.sweep_duration/tsamp).astype('int')
self.nbinsextra = self.nbins
if (self.start_bin+self.nbinsextra) > N-1:
self.nbinsextra = N-1-self.start_bin
self.bandpass_corr = False
def manual_params(self, subdm, dm, sweep_dm, sigma, start_time, width_bins, downsamp, \
duration, nbins, nsub, tsamp, N, lofreq, hifreq, rawdatafile, loc_pulse=0.5, dedisp = False, \
scaleindep = None, zerodm = None, mask = False, barytime = True, \
bandpass_corr = False):
"""
Set up parameters based on input from the groups.txt file.
Input:
subdm: DM to use when subbanding.
dm: DM to use when dedispersing data for plot.
sweep_dm: Show the frequency sweep using this DM.
sigma: signal-to-noise of the pulse
start_time: start time of the data to be read in for waterfalling.
width_bins: Smooth each channel/subband with a boxcar width_bins wide.
downsamp: Factor to downsample in time by. Default: Don't downsample.
duration: duration of data to be waterfalled.
nbins: Number of time bins to plot. This option overrides
the duration argument.
nsub: Number of subbands to use. Must be a factor of number of channels.
tsamp: sampling time (downsampled: 65.5 us for PALFA)
N: total number of samples in an observations
lofreq: lowest observation frequency
hifreq: highest observation frequency
rawdatafile: supply a psrfits file instance
optional arguments:
dedisp: Do you want to dedisperse?(Type: Boolean).
scaleindep:Do you want to scale each subband independently?(Type: Boolean)
zerodm:Do you want to use zero-DM filtering?(Type: Boolean)
mask: Do you want to use a rfifind mask? (Type: Boolean)
barytime: Is the given time(s) barycentric?
bandpass_corr:Would you like to remove the bandpass? (Type: Boolean)
"""
self.subdm = subdm
self.mask = mask
self.sigma = sigma
topo, bary = bary_and_topo.bary_to_topo(rawdatafile.filename, rawdatafile=rawdatafile)
time_shift = bary-topo
if barytime:
#### if the time is barycentric then get the corresponding topocentric time.
self.bary_start_time = start_time
self.topo_start_time = self.bary_start_time - topo_timeshift(self.bary_start_time, \
time_shift, topo)[0]
else:
#### Vice versa.
self.topo_start_time = start_time
self.bary_start_time = self.topo_start_time + topo_timeshift(self.bary_start_time, \
time_shift, topo)[0]
self.sample_number = np.round(self.bary_start_time/tsamp).astype('int')
self.width_bins = width_bins
sweep_posn = 0.0
if scaleindep:
self.scaleindep = True
self.downsamp = downsamp
if nbins:
self.nbins = nbins
self.duration = self.nbins*tsamp
if duration:
self.duration = duration
self.nbins = np.round(self.duration/tsamp).astype('int')
self.start = self.topo_start_time - (loc_pulse * self.duration)
if (self.start<0.0):
self.start = 0.0
self.start_bin = np.round(self.start/tsamp).astype('int')
self.pulse_width = self.width_bins*self.downsamp*tsamp
self.nsub = nsub
self.zerodm = zerodm
if dedisp:
if dm:
self.dm = dm
else:
self.dm = self.subdm
self.sweep_dm = None
self.sweep_duration = 0.0
self.dmfac = 4.15e3 * np.abs(1./lofreq**2 - 1./hifreq**2)
self.nbinsextra = self.nbins + np.round((self.dmfac * self.dm)/tsamp).astype('int')
if (self.start_bin+self.nbinsextra) > N-1:
self.nbinsextra = N-1-self.start_bin
self.bandpass_corr = bandpass_corr
else:
self.dm = None
if not sweep_dm:
self.sweep_dm = self.subdm
else:
self.sweep_dm = sweep_dm
self.sweep_duration = 4.15e3 * np.abs(1./lofreq**2-1./hifreq**2)*self.sweep_dm
self.start = self.start + (loc_pulse*self.duration)
self.start_bin = np.round(self.start/tsamp).astype('int')
self.nbinsextra = self.nbins
if (self.start_bin+self.nbinsextra) > N-1:
self.nbinsextra = N-1-self.start_bin
self.bandpass_corr = False
| 10,190 | 43.697368 | 116 | py |
presto | presto-master/python/presto/singlepulse/bary_and_topo.py | #! /usr/bin/env python
"""
Original code found in presto. Written by Scott M. Ransom.
Modified by Chitrang Patel to read information from PSRFITs file.
Modified to return topocentric and corresponding barycentric
times.
"""
from __future__ import print_function
from presto.presto.prestoswig import *
import numpy as Num
from presto import psr_utils
from presto import psrfits
def read_inffile(filename):
"""
read_inffile(filename):
Return an infodata 'C' structure containing the data from the
'inf' file in 'filename'. 'filename' should not include the
'.inf' suffix.
"""
id = infodata()
print("Reading information from", "\""+filename+".inf\"")
readinf(id, filename)
return id
def bary_to_topo(infofilenm, rawdatafile=False, ephem="DE200"):
"""
bary_to_topo(infofilenm, ephem="DE200"):
Returns the barycentric and topocentric times evert 10 seconds.
The data for the observation must be found in the info file.
"""
if infofilenm[-4:]==".inf":
infofilenm = infofilenm[:-4]
filetype = 'inf'
elif infofilenm[-5:]==".fits":
infofilenm = infofilenm
filetype = 'PSRFITS'
else:
raise ValueError("file type not recognized. Must be .inf, or .fits")
if filetype=="inf":
obs = read_inffile(infofilenm)
T = obs.N * obs.dt
dt = 10.0
tto = obs.mjd_i + obs.mjd_f
tts = Num.arange(tto, tto + (T + dt) / psr_utils.SECPERDAY, dt / psr_utils.SECPERDAY)
nn = len(tts)
bts = Num.zeros(nn, 'd')
vel = Num.zeros(nn, 'd')
ra = psr_utils.coord_to_string(obs.ra_h, obs.ra_m, obs.ra_s)
dec = psr_utils.coord_to_string(obs.dec_d, obs.dec_m, obs.dec_s)
if (obs.telescope == 'Parkes'): tel = 'PK'
elif (obs.telescope == 'Effelsberg'): tel = 'EB'
elif (obs.telescope == 'Arecibo'): tel = 'AO'
elif (obs.telescope == 'MMT'): tel = 'MT'
elif (obs.telescope == 'GBT'): tel = 'GB'
else:
print("Telescope not recognized.")
return 0
elif filetype=="PSRFITS":
if not rawdatafile:
rawdatafile = psrfits.PsrfitsFile(infofilenm)
T = rawdatafile.specinfo.T
dt = 10.0
tto = rawdatafile.specinfo.start_MJD[0]
tts = Num.arange(tto, tto + (T + dt) / psr_utils.SECPERDAY, dt / psr_utils.SECPERDAY)
nn = len(tts)
bts = Num.zeros(nn, 'd')
vel = Num.zeros(nn, 'd')
ra = rawdatafile.specinfo.ra_str
dec = rawdatafile.specinfo.dec_str
if (rawdatafile.specinfo.telescope == 'Parkes'): tel = 'PK'
elif (rawdatafile.specinfo.telescope == 'Effelsberg'): tel = 'EB'
elif (rawdatafile.specinfo.telescope == 'Arecibo'): tel = 'AO'
elif (rawdatafile.specinfo.telescope == 'MMT'): tel = 'MT'
elif (rawdatafile.specinfo.telescope == 'GBT'): tel = 'GB'
else:
print("Telescope not recognized.")
return 0
barycenter(tts, bts, vel, nn, ra, dec, tel, ephem)
avgvel = Num.add.reduce(vel) / nn
tts = Num.arange(nn, dtype='d') * dt
bts = (bts - bts[0]) * psr_utils.SECPERDAY
return tts, bts
| 3,158 | 35.310345 | 92 | py |
presto | presto-master/python/presto/singlepulse/spio.py | from __future__ import print_function
from builtins import range
import numpy as _np
import fileinput as _fileinput
from scipy.special import erf
def get_textfile(txtfile):
""" Read in the groups.txt file.
Contains information about the DM, time, box car width, signal to noise, sample number and rank of groups.
Input: name of the text file.
Output: Array of strings in the file line by line.
"""
return _np.loadtxt(txtfile,dtype = 'str',delimiter='\n')
def group_info(rank, txtfile):
"""
Extracts out relevant information from the groups.txt file as strings.
Input: integer:rank of the groups to read.
string :name of the text file to read from.
Output:
list of single pulse parameters as strings.
"""
files = get_textfile(txtfile)
lis=_np.where(files == '\tRank: %i.000000'%rank)[0]#Checks for this contidion and gives its indices where true.
# Extract the Max_ sigma value for the required parameters
parameters=[]
for i in range(len(lis)):
temp_list = files[lis[i]-1].split()
max_sigma = temp_list[2]
max_sigma = float(max_sigma)
max_sigma = '%.2f'%max_sigma
# Extract the number of pulses for this group
temp_list = files[lis[i]-6].split()
number_of_pulses = int(temp_list[2])
# Slice off a mini array to get the parameters from
temp_lines = files[(lis[i]+1):(lis[i]+number_of_pulses+1)]
# Get the parameters as strings containing the max_sigma
parameters.append(temp_lines[_np.array([max_sigma in line for line in temp_lines])])
return parameters
def split_parameters(rank, txtfile):
"""
Splits the string into individual parameters and converts them into floats/int.
Input: integer:rank of the groups to read.
string :name of the text file to read from.
Output:
list of single pulse parameters as floats and integers.
"""
parameters = group_info(rank, txtfile)
final_parameters=[]
for i in range(len(parameters)):
# If there is a degeneracy in max_sigma values, Picks the first one.(Can be updated to get the best pick)
correct_values = parameters[i][0].split()
correct_values[0] = float(correct_values[0])
correct_values[1] = float(correct_values[1])
correct_values[1] = float('%.2f'%correct_values[1])
correct_values[2] = float(correct_values[2])
correct_values[3] = int(correct_values[3])
correct_values[4] = int(correct_values[4])
final_parameters.append(correct_values)
return final_parameters
def read_RRATrap_info(groupfile, group_to_read, rank):
"""
Generates DM, time, signal-to-noise and pulse width arrays for all the single pulse events
that belong to this group.
Inputs:
groups.txt file.
list of line numbers in the file that read something like : Rank: 6.000000
rank of the group according to the RRATrap classification.
Outputs:
dm_list, dm_arr : DMs of the single pulse events in this group
time_list : Times of the single pulse events in this group
sigma_arr : signal-to-noise of the single pulse events in this group
width_arr : box car widths of the single pulse events in this group
"""
files = get_textfile(groupfile)
if files[rank-1] != "Number of rank %i groups: 0 "%rank: # checks whether there are %i \
# ranked groups in the file.
print(files[rank-1])
print("Making arrays for DM vs Signal to Noise...")
temp_list = files[group_to_read-6].split()
npulses = int(temp_list[2])
temp_lines = files[(group_to_read+3):(group_to_read+npulses+1)]
arr = _np.split(temp_lines, len(temp_lines))
dm_list = []
time_list = []
for i in range(len(arr)):
dm_val= float(arr[i][0].split()[0])
time_val = float(arr[i][0].split()[2])
dm_list.append(dm_val)
time_list.append(time_val)
arr_2 = _np.array([arr[i][0].split() for i in range(len(arr))], dtype = _np.float32)
dm_arr = _np.array([arr_2[i][0] for i in range(len(arr))], dtype = _np.float32)
sigma_arr = _np.array([arr_2[i][1] for i in range(len(arr))], dtype = _np.float32)
width_arr = _np.array([arr_2[i][4] for i in range(len(arr))], dtype = _np.int8)
else:
print("No Rank %i groups for this candidate."%rank)
return dm_list, time_list, dm_arr, sigma_arr, width_arr
def old_read_sp_files(sp_files):
"""*** OLD VERSION ***
Read all *.singlepulse files in the current directory.
Return 5 arrays (properties of all single pulses):
DM, sigma, time, sample, downfact.
"""
tmp_sp_params = _np.array(_np.empty((1,0)),
dtype=_np.dtype([('dm', 'float64'),
('sigma','float32'),
('time','float64'),
('sample','uint32'),
('downfact','uint8')]))
for file in sp_files:
if os.path.getsize(file):
curr = _np.atleast_2d(_np.loadtxt(file, dtype=_np.dtype([('dm', 'float64'),('sigma','float32'),('time','float64'),('sample','uint32'),('downfact','uint8')])))
tmp_sp_params = _np.concatenate([tmp_sp_params, curr], axis=1)
return tmp_sp_params
def read_sp_files(files):
"""Read all *.singlepulse files in the current directory in a DM range.
Return 5 arrays (properties of all single pulses):
DM, sigma, time, sample, downfact."""
finput = _fileinput.input(files)
data = _np.loadtxt(finput,
dtype=_np.dtype([('dm', 'float32'),
('sigma','float32'),
('time','float32'),
('sample','uint32'),
('downfact','uint8')]))
return _np.atleast_2d(data)
def read_tarfile(filenames, names, tar):
"""Read in the .singlepulse.tgz file instead of individual .singlepulse files.
Return an array of (properties of all single pulses):
DM, sigma, time, sample, downfact.
Input: filenames: names of all the singlepulse files.
names: subset of filenames. Names of the singlepulse files to be
plotted in DM vs time.
tar: tar file (.singlepulse.tgz)."""
members = []
for name in names:
if name in filenames:
member = tar.getmember(name)
members.append(member)
else:
pass
fileinfo = []
filearr = []
for mem in members:
file = tar.extractfile(mem)
for line in file.readlines():
fileinfo.append(line)
filearr+=(fileinfo[1:]) #Removes the text labels ("DM", "sigma" etc) of the singlepulse properties. Only keeps the values.
fileinfo = []
temp_list = []
for i in range(len(filearr)):
temp_line = filearr[i].split()
temp_list.append(temp_line)
main_array = _np.asarray(temp_list)
main_array = _np.split(main_array, 5, axis=1)
main_array[0] = main_array[0].astype(_np.float16)
main_array[1] = main_array[1].astype(_np.float16)
main_array[2] = main_array[2].astype(_np.float16)
main_array[3] = main_array[3].astype(_np.int)
main_array[4] = main_array[4].astype(_np.int)
return main_array
def pick_DM_for_singlepulse_files(filenm):
"""
Extract DM values from filenames from .singlepulse files.
Input: string : .singlepulse file mane
Output: float: DM value of that file.
"""
return float(filenm[filenm.find('DM')+2:filenm.find('.singlepulse')])
def ddm_response(ddm, width_ms, lofreq, hifreq):
if _np.isscalar(ddm):
ddm = _np.array([ddm])
scal = True
else:
ddm = _np.array([ddm])
scal = False
band_MHz = _np.array((lofreq, hifreq))
zeta = 6.91e-3 * ddm * _np.diff(band_MHz)[0] / (width_ms * (_np.mean(band_MHz)/1000.)**3)
result = _np.zeros_like(ddm)
where_nonzero = _np.where(zeta != 0)
result[where_nonzero] = 0.5*_np.sqrt(_np.pi)*erf(zeta[where_nonzero])/zeta[where_nonzero]
result[zeta == 0] = 1.
if scal: return result[0]
else: return result
def theoritical_dmspan(maxsigma, minsigma, width_ms, lofreq, hifreq):
# since the sigma threshold = 5
sigma_limit = minsigma/maxsigma
# spans over a dm range of 1000 (500*2)
ddm = _np.linspace(0, 5000, 50001)
# makes a normalized gaussian of sigma values
sigma_range = ddm_response(ddm, width_ms, lofreq, hifreq)
# Returns te index where sigma_limit is closest to one of the values in sigma_range
ind = (_np.abs(sigma_range-sigma_limit)).argmin()
return 2*ddm[ind]
def gen_arrays(dm, sp_files, tar, threshold):
"""
Extract dms, times and signal to noise from each singlepulse file as 1D arrays.
Input:
dm: The dm array of the main pulse. Used to decide the DM range in the DM vs time plot and pick out singlepulse files with those DMs.
threshold: Min signal to noise of the single pulse event that is plotted.
sp_files: all the .singlepulse file names.
tar: Instead of the providing individual singlepulse files, you can provide the .singlepulse.tgz tarball.
Output:
Arrays: dms, times, sigmas of the singlepulse events and an array of dm_vs_times file names.
Options: Either a tarball of singlepulse files or individual singlepulse files can be supplied.
Faster when individual singlepulse files are supplied.
"""
max_dm = _np.ceil(_np.max(dm)).astype('int')
min_dm = _np.min(dm).astype('int')
diff_dm = max_dm-min_dm
ddm = min_dm-diff_dm
hidm = max_dm+diff_dm
if (ddm <= 0):
ddm = 0
name_DMs = _np.asarray([pick_DM_for_singlepulse_files(sp_files[x]) for x in range(len(sp_files))])
inds = name_DMs.argsort()
name_DMs = name_DMs[inds]
sp_files = _np.asarray(sp_files)[inds]
loidx = _np.argmin(_np.abs(name_DMs-ddm))
hiidx = _np.argmin(_np.abs(name_DMs-hidm))
print(loidx, hiidx)
singlepulsefiles = list(sp_files[loidx:hiidx])
if tar is not None:
data = read_tarfile(sp_files, singlepulsefiles, tar)
dms = _np.reshape(data[0],(len(data[0]),))
times = _np.reshape(data[2],(len(data[1]),))
sigmas = _np.reshape(data[1],(len(data[2]),))
widths = _np.reshape(data[4],(len(data[4]),))
else:
data = read_sp_files(singlepulsefiles)[0]
dms = data['dm']
times = data['time']
sigmas = data['sigma']
widths = data['downfact']
dms = _np.delete(dms, (0), axis = 0)
times = _np.delete(times, (0), axis = 0)
sigmas = _np.delete(sigmas, (0), axis = 0)
widths = _np.delete(widths, (0), axis = 0)
return dms, times, sigmas, widths, singlepulsefiles
def read_spd(spd_file, tar = None):
"""
Reads in all the .spd and the .singlepulse.tgz info that can reproduce the sp plots.
Inputs: spd_file: .spd file
.singlepulse.tgz: if not supplied, it will only output .spd info.
Default: not supplied.
Output: An object that has all the relevant information to remake the plot.
"""
sp = spd(spd_file)
if tar is not None:
dmVt_dms, dmVt_times, dmVt_sigmas, dmVt_widths, dmVt_files = gen_arrays(sp.dmVt_this_dms, sp.spfiles, tar, threshold=5)
sp.dmVt_dms = dmVt_dms
sp.dmVt_times = dmVt_times
sp.dmVt_sigmas = dmVt_sigmas
sp.dmVt_widths = dmVt_widths
return sp
else:
return sp
| 11,929 | 41.913669 | 169 | py |
presto | presto-master/python/presto/singlepulse/rrattrap.py | #!/usr/bin/env python
"""
rrattrap.py
Single pulse sifting code: performs grouping and ranking of single pulses identified by PRESTO's single_pulse_search.py.
Usage:
python rrattrap.py [OPTIONS] --inffile <.inf file> <.singlepulse files>
Output:
- groups.txt : a file listing all single pulse groups and their ranking.
- several colourized DM vs. time single-pulse plots, for different DM ranges, with colours corresponding to group ratings.
Chen Karako May 7, 2014
Updated by Chitrang Patel June 10, 2016.
"""
from __future__ import print_function
#from past.builtins import cmp
from time import strftime
from presto import infodata
import matplotlib.pyplot as plt
import numpy as np
from presto.Pgplot import *
import optparse
from presto.singlepulse import spio
FRACTIONAL_SIGMA = 0.9 # change to 0.8?
ALL_RANKS_ORDERED = [1,2,0,3,4,5,6]
DEBUG = True # if True, will be verbose
def dmthreshold(dm, use_dmplan, min_group=45):
if use_dmplan:
import singlepulse.rrattrap_config as rrattrap_config
dmt, min_group = rrattrap_config.use_dmplan(dm)
else:
dmt = 1
min_group = min_group
return dmt, min_group
class SinglePulseGroup(object): # Greg's modification
"""Define single pulse group
"""
__slots__ = ['min_dm', 'max_dm', 'max_sigma', 'center_time',
'min_time', 'max_time', 'duration',
'singlepulses', 'numpulses', 'rank'] # Greg's modification
def __init__(self, dm, sigma, time, sample, downfact):
"""SinglePulseGroup constructor.
Takes as input one single pulse (creates a group of one)
inputs DM,Sigma,Time,Sample,Downfact.
"""
self.min_dm = dm
self.max_dm = dm
self.max_sigma = sigma
self.center_time = time
if sample == 0:
dt = 0 # this will ignore events with sample=0.
# better would be to use the inf files
# to determine the dt for these events
else:
dt = time/sample
self.min_time = time-downfact/2.0*dt
self.max_time = time+downfact/2.0*dt
self.duration = self.max_time - self.min_time
self.singlepulses = [(dm,sigma,time,sample,downfact)]
self.numpulses = 1
self.rank = 0
# Python 3 no longer honors the __cmp__ special method, so another way of comparing is required
# see: https://portingguide.readthedocs.io/en/latest/comparisons.html
# def __cmp__(self, other):
# return cmp(ALL_RANKS_ORDERED.index(self.rank),
# ALL_RANKS_ORDERED.index(other.rank))
def timeisclose(self,other,use_dmplan,time_thresh=0.5):
"""Checks whether the overlap in time of self and other is within
time_thresh. Takes as input other, a SinglePulseGroup object,
as well as the optional input time_thresh (in s).
"""
if self.duration < other.duration:
narrow = self
wide = other
else:
narrow = other
wide = self
time_thresh = dmthreshold(self.min_dm, use_dmplan)[0]*time_thresh
dt = max(time_thresh, narrow.duration/2.0) # always group groups within time_thresh (or duration/2, if longer) of one another
timeisclose = (wide.max_time >= (narrow.center_time - dt)) and\
(wide.min_time <= (narrow.center_time + dt))
return timeisclose
def dmisclose(self,other,use_dmplan,dm_thresh=0.1):
"""Checks whether the DM of self and other is within dm_thresh of one
another. Takes as input other, a SinglePulseGroup object, as well as the optional input dm_thresh (in pc cm-3).
"""
dm_thresh = dmthreshold(self.min_dm, use_dmplan)[0]*dm_thresh
dmisclose = (other.max_dm >= (self.min_dm-dm_thresh)) and\
(other.min_dm <= (self.max_dm+dm_thresh))
return dmisclose
def combine(self,other):
"""combines self and other SinglePulseGroup objects.
takes as input other, a SinglePulseGroup object.
combines in place; nothing returned.
"""
self.min_dm = min(self.min_dm, other.min_dm)
self.max_dm = max(self.max_dm, other.max_dm)
self.min_time = min(self.min_time, other.min_time)
self.max_time = max(self.max_time, other.max_time)
self.max_sigma = max(self.max_sigma, other.max_sigma)
self.duration = self.max_time - self.min_time
self.center_time = (self.min_time + self.max_time)/2.0
self.numpulses = self.numpulses + other.numpulses
self.singlepulses.extend(other.singlepulses)
def __str__(self):
s = ["Group of %d single pulses: " % len(self.singlepulses), \
"\tMin DM (cm-3 pc): %f" % self.min_dm, \
"\tMax DM (cm-3 pc): %f" % self.max_dm, \
"\tCenter time (s): %f" % self.center_time, \
"\tDuration (s): %f" % self.duration, \
"\tMax sigma: %f" % self.max_sigma, \
"\tRank: %f" % self.rank]
return '\n'.join(s)
def create_groups(sps, inffile, min_nearby=1, time_thresh=0.5, \
dm_thresh=0.1, ignore_obs_end=0, use_dmplan=False):
"""Given a recarray of singlepulses return a list of
SinglePulseGroup objects.
Inputs:
sps: A recarray of single pulse info.
min_nearby: Minimum number of nearby single pulse events
to bother creating a group.
time_thresh: Time-range within which another event must be found
dm_thresh: DM-range within which another event must be found
ignore_obs_end: if non-zero, the time (in seconds) to ignore from
the end of the observation. Useful for beams on which zero-DMing
was applied and caused artifacts in the sp output at the end of
the obs.
*** NOTE: time_thresh and dm_thresh are used together
Outputs:
groups: A list of SinglePulseGroup objects.
"""
Tobs = get_obs_info(inffile)['T'] # duration of observation
if not (0 <= ignore_obs_end < Tobs):
print("Invalid ignore_obs_end value. Value must be: \
0 <= ignore_obs_end < Tobs. Setting ignore_obs_end to 0.")
ignore_obs_end = 0
Tignore = Tobs - ignore_obs_end # sps with t>=Tignore will be ignored
numsps = len(sps)
# Sort by time
sps.sort(order='time')
groups = []
for ii in range(numsps):
# Time and DM of current SP event
ctime = sps[ii]['time']
if ignore_obs_end and (ctime > Tignore):
continue
cdm = sps[ii]['dm']
ngood = 0 # number of good neighbours
time_thresh = dmthreshold(cdm, use_dmplan)[0]*time_thresh
dm_thresh = dmthreshold(cdm, use_dmplan)[0]*dm_thresh
jj = ii+1
while (ngood < min_nearby) and (jj < numsps) and \
(sps[jj]['time'] < (ctime+time_thresh)):
if abs(sps[jj]['dm'] - cdm) < dm_thresh:
ngood += 1
jj += 1
# Look backward as well
jj = ii-1
while (ngood < min_nearby) and (jj >= 0) and \
(sps[jj]['time'] > (ctime-time_thresh)):
if abs(sps[jj]['dm'] - cdm) < dm_thresh:
ngood += 1
jj -= 1
if ngood >= min_nearby:
# At least min_nearby nearby SP events
grp = SinglePulseGroup(*sps[ii])
groups.append(grp)
return groups
def grouping_sp_dmt(groups, use_dmplan=False, time_thresh=0.5, dm_thresh=0.1):
"""Groups SinglePulse objects based on proximity in time, DM.
Outputs list of Single Pulse Groups.
"""
didcombine = True
while didcombine:
didcombine = False
groups.sort(key=lambda group: group.min_time) #Sort by time
for i, grp1 in enumerate(groups):
j=i+1
while (j<len(groups) and groups[i].center_time+0.2 > groups[j].center_time): #Only look at groups that are close in time
if grp1.dmisclose(groups[j], use_dmplan, dm_thresh):
if grp1.timeisclose(groups[j], use_dmplan, time_thresh):
grp1.combine(groups.pop(j))
didcombine = True
j=j+1
def grouping_rfi(groups, use_dmplan=False, time_thresh=0.5, dm_thresh=0.1):
"""
Groups together close groups of RFI, and considers as RFI other groups
that are close to RFI.
"""
didcombine = True
while didcombine:
didcombine = False
# If a group is very close to a group of rfi, set it as rfi
for i in reversed(list(range(len(groups)))):
grp1 = groups[i]
for j in reversed(list(range(len(groups)))):
if j <= i:
continue
grp2 = groups[j]
if (grp1.rank != 2) and (grp2.rank != 2):
continue
if grp1.dmisclose(grp2,use_dmplan,10) and grp1.timeisclose(grp2, use_dmplan, time_thresh): # use bigger time thresh?
grp1.combine(groups.pop(j))
# FIXME: Should we set as RFI without checking
# sigma behaviour (ie re-check rank) for group?
grp1.rank = 2 # Set as rfi
didcombine = True
def grouping_sp_t(groups, use_dmplan=False, time_thresh=0.5, dm_thresh=0.1):
"""Groups SinglePulse objects based on proximity in time, assuming
the DM difference is no more than DMDIFF=10.
Inputs:
groups: A list of SinglePulseGroup objects.
Outputs:
groups: A list of SinglePulseGroup objects.
"""
DMDIFF = 10 #max allowed DM difference between groups that will be grouped in time
didcombine = True
while didcombine:
didcombine = False
for i, grp1 in enumerate(groups):
for j in range(len(groups)-1,i,-1):
if grp1.timeisclose(groups[j], use_dmplan, time_thresh) and \
grp1.dmisclose(groups[j],use_dmplan,DMDIFF): # We check if two events
# have similar time and
# a DM difference < DMDIFF
grp1.combine(groups.pop(j)) # Note group rank is not
# updated when combine groups,
# need to re-run ranking after.
didcombine = True
return groups
def flag_noise(groups, use_dmplan=False, min_group=45):
"""Flag groups as noise based on group size.
If the number of sp events in a group is < min_group,
this group is marked as noise.
Inputs:
groups: A list of SinglePulseGroup objects.
min_group: The minimum group size that a group must have
in order not to be considered as noise. The
default min_group is MIN_GROUP.
Outputs:
None
"""
for grp in groups:
min_group = dmthreshold(grp.min_dm, use_dmplan, min_group)[1]
if grp.numpulses < min_group:
grp.rank = 1
return groups
def flag_rfi(groups, close_dm = 2.0):
"""Flag groups as RFI based on sigma behavior.
Takes as input list of Single Pulse Groups.
The ranks of the groups are updated in-place.
Inputs:
groups: A list of SinglePulseGroup objects.
Outputs:
None
"""
for grp in groups:
if (grp.rank != 2) and (grp.min_dm <= close_dm): # if grp has not
# yet been marked RFI
for sp in grp.singlepulses:
if (sp[0] <= close_dm) and \
(sp[1] >= (FRACTIONAL_SIGMA*grp.max_sigma)):
# if any sp in the group has low dm, and its sigma is >= frac sigma*grp.max_sigma, call that grp rfi
grp.rank = 2
break
def rank_groups(groups, use_dmplan=False, min_group=45, min_sigma=8.0):
"""Rank groups based on their sigma vs. DM behaviour.
Takes as input list of Single Pulse Groups.
The ranks of the groups are updated in-place.
Inputs:
groups: A list of SinglePulseGroup objects.
Outputs:
None
"""
# divide groups into 5 parts (based on number events) to examine sigma behaviour
for grp in groups:
min_group = dmthreshold(grp.min_dm, use_dmplan, min_group)[1]
if len(grp.singlepulses) < min_group:
grp.rank = 1
elif grp.rank != 2: # don't overwrite ranks of rfi groups
numsps = len(grp.singlepulses)
# sort list by increasing DM
idmsort = np.argsort([sp[0] for sp in grp.singlepulses])
sigmas = np.ma.zeros(int(np.ceil(numsps/5.0)*5))
sigmas[-numsps:] = np.asarray([sp[1] for sp in grp.singlepulses])[idmsort]
# Mask sigma=0. These are elements added to pad size of array
# to have multiple of 5 elements
# (there should never be actual SPs with sigma=0)
sigmas = np.ma.masked_equal(sigmas, 0.0)
sigmas.shape = (5, int(np.ceil(numsps/5.0)))
maxsigmas = sigmas.max(axis=1)
avgsigmas = sigmas.mean(axis=1)
# standard deviation in signal to noise values in the group.
stdsigmas = sigmas.std(axis=1)
# The largest std deviation
maxstd = np.ma.max(stdsigmas)
# The smallest std deviation
minstd = np.ma.min(stdsigmas)
# The largest maxsigma
maxsigma = np.ma.max(maxsigmas)
# The smallest maxsigma
minsigma = np.ma.min(maxsigmas)
# The largest avgsigma
maxavgsigma = np.ma.max(avgsigmas)
# The smallest avgsigma
minavgsigma = np.ma.min(avgsigmas)
#if maxavgsigma<1.05*minavgsigma:
if all(stdsigma < 0.1 for stdsigma in stdsigmas):
# Sigmas pretty much constant. Group is RFI
grp.rank = 2
if maxsigmas[2] > maxsigmas[1]:
if maxsigmas[2] > maxsigmas[3]:
# nearest neighbour subgroups both have smaller sigma
grp.rank = 3
if (maxsigmas[3] > maxsigmas[4]) and (maxsigmas[1] > maxsigmas[0]):
#next-nearest subgps have sigma < nearest neighbours
grp.rank = 4
if maxsigmas[2] > min_sigma:
# We want the largest maxsigma to be at least
# 1.15 times bigger than the smallest
grp.rank = 5
if (avgsigmas[2] > avgsigmas[0]) and \
(avgsigmas[2] > avgsigmas[4]) and \
maxsigma>1.15*minsigma:
grp.rank = 6
else: #ie. maxsigmas[2] <= maxsigmas[3], allowing for asymmetry:
if maxsigmas[1] > maxsigmas[0]:
grp.rank = 3
if maxsigmas[3] > maxsigmas[4]:
grp.rank = 4
if maxsigmas[3] > min_sigma:
grp.rank = 5
if (avgsigmas[3] > avgsigmas[0]) and \
(avgsigmas[3] > avgsigmas[4]) and \
maxsigma>1.15*minsigma:
grp.rank = 6
else: #ie. maxsigma2 >= maxsigma3, allowing for asymmetry:
if (maxsigmas[1] > maxsigmas[0]) and (maxsigmas[2] > maxsigmas[3]):
grp.rank = 3
if maxsigmas[3] > maxsigmas[4]:
grp.rank = 4
if maxsigmas[1] > min_sigma:
grp.rank = 5
if (avgsigmas[1] >= avgsigmas[0]) and \
(avgsigmas[1] > avgsigmas[4]) and \
maxsigma>1.15*minsigma:
grp.rank = 6
if any(stdsigma < 0.1 for stdsigma in stdsigmas) and (grp.max_sigma < 5.5): # if max sigma of the group is less than 5.5 and the sigma distribution is mostly flat, then it is not likely to be astrophysical.
grp.rank = 0
if grp.rank == 0:
pass
def check_dmspan(groups, dt, lofreq, hifreq):
"""Read in groups and check whether each group's DM span exceeds the threshold.
"""
for grp in groups:
for sp in grp.singlepulses:
if sp[1] == grp.max_sigma:
downsamp = (sp[2]/dt)/sp[3]
width_ms = 1000.0*sp[4]*dt*downsamp
break
if (grp.max_dm-grp.min_dm > 5*spio.theoritical_dmspan(grp.max_sigma, 5.0, width_ms, lofreq, hifreq)):
# checks if the DM span is more than 5 times theoritical dm value.
if not ((grp.rank == 5) or (grp.rank == 6)): #if group is not good or excellent
grp.rank = 2 # then its most likely RFI.
def get_obs_info(inffile):
"""Read in an .inf file to extract observation information.
Return observation RA, Dec, duration, and source name.
"""
inf = infodata.infodata(inffile)
T = inf.dt * inf.N # total observation time (s)
RA = inf.RA
dec = inf.DEC
src = inf.object
MJD = inf.epoch
telescope = inf.telescope
freq = (inf.numchan/2-0.5)*inf.chan_width+inf.lofreq # center freq
return {'T': T, 'RA': RA, 'dec': dec, 'src': src, 'MJD': MJD, 'telescope': telescope, 'freq': freq}
def plot_sp_rated_all(groups, ranks, inffile, ylow=0, yhigh=100, xlow=0, xhigh=120):
"""Take in dict of Single Pulse Group lists and
plot the DM vs. t for all, with the plotted
colour corresponding to group rank.
The DM range to plot can also be specified.
"""
rank_to_color = {2:'darkgrey', 0:'k', 3:'c', 4:'royalblue', 5:'b', 6:'m'}
# Prepare data to plot
dm = []
time = []
size = []
colors = []
for grp in groups:
if grp.rank not in ranks:
continue
if grp.min_dm < yhigh and grp.max_dm > ylow:
for sp in grp.singlepulses:
dm.append(sp[0])
size.append(np.clip(3*sp[1]-14,0,50))
time.append(sp[2])
colors.append(rank_to_color[grp.rank])
# Plot
plt.axes()
if len(time): # check if there are points to plot
plt.scatter(time, dm, c=colors, marker='o', s=size, edgecolor='none')
plt.xlabel('Time (s)')
plt.ylabel('DM (pc cm$^{-3}$)')
# if inf file exists, will override xlow and xhigh
# specified when function is called
if get_obs_info(inffile) is not None: # if inf files exist, can get obs info
obsinfo = get_obs_info(inffile)
plt.title('Single Pulse Results for %s\nRA: %s Dec: %s' %
(obsinfo['src'], obsinfo['RA'], obsinfo['dec']))
xhigh = obsinfo['T'] # set xhigh to observation duration
plt.xlim((xlow, xhigh))
plt.ylim((ylow, yhigh))
print_debug("Saving figure...")
plt.savefig('grouped_sps_DMs%s-%s.png' % (ylow, yhigh), dpi=300)
def plot_sp_rated_pgplot(groups, ranks, inffile, ylow=0, yhigh=100, xlow=0, xhigh=120):
"""Plot groups according to their ranks. Uses pgplot rather
than matplotlib for faster, more memory-efficient plotting.
Inputs:
groups: A list of SinglePulseGroup objects.
ylow (optional): lower y limit to plot. Default: ylow=0.
yhigh (optional): higher y limit to plot. Default: yhigh=100.
xlow (optional): lower x limit to plot. Default: xlow=0.
xhigh (optional): higher x limit to plot. Default: use inf file
to find observation duration, or if inf file unavailable,
use xhigh=120s.
Outputs:
None; saves a colorized sp plot.
"""
if get_obs_info(inffile) is not None: # if inf files exist, can get obs info
obsinfo = get_obs_info(inffile)
#plt.title('Single Pulse Results for %s\nRA: %s Dec: %s' %
#(obsinfo['src'], obsinfo['RA'], obsinfo['dec']))
xhigh = obsinfo['T'] # set xhigh to observation duration
ppgplot.pgopen('grouped_pgplot_DMs%s-%s.ps/vcps' % (ylow, yhigh))
# copying from single_pulse_search.py plotting
ppgplot.pgpap(9, 0.7) # Width in inches, aspect
# ppgplot.pgsvp(0.06, 0.97, 0.08, 0.52) # single_pulse_search.py
ppgplot.pgsvp(0.87, 0.97, 0.80, 0.97)
ppgplot.pgsch(0.9)
ppgplot.pgsci(15)
ppgplot.pgpt(np.array([0.01]), np.array([0.12]), 9)
ppgplot.pgptxt(0.1, 0.1, 0.0, 0.0, '2: RFI')
ppgplot.pgsci(1)
ppgplot.pgpt(np.array([0.01]), np.array([0.27]), 9)
ppgplot.pgptxt(0.1, 0.25, 0.0, 0.0, '0: Noise')
ppgplot.pgsci(5)
ppgplot.pgpt(np.array([0.01]), np.array([0.42]), 9)
ppgplot.pgptxt(0.1, 0.4, 0.0, 0.0, '3: Ok')
ppgplot.pgsci(11)
ppgplot.pgpt(np.array([0.01]), np.array([0.57]), 9)
ppgplot.pgptxt(0.1, 0.55, 0.0, 0.0, '4: Good')
ppgplot.pgsci(4)
ppgplot.pgpt(np.array([0.01]), np.array([0.72]), 9)
ppgplot.pgptxt(0.1, 0.7, 0.0, 0.0, '5:Very Good')
ppgplot.pgsci(6)
ppgplot.pgpt(np.array([0.01]), np.array([0.87]), 9)
ppgplot.pgptxt(0.1, 0.85, 0.0, 0.0, '6: Excellent')
ppgplot.pgsci(1)
ppgplot.pgsvp(0.06, 0.97, 0.08, 0.80)
#ppgplot.pgenv(xlow, xhigh, ylow, yhigh, 0, 1) #not sure if need 0,1
ppgplot.pgswin(xlow, xhigh, ylow, yhigh)
ppgplot.pgsch(0.8)
ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0) # redundant with pgenv
ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time (s)")
ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "DM (pc cm\\u-3\\d)")
ppgplot.pgsch(1.0)
ppgplot.pgmtxt('T', 2.5, 0.3, 0.0,
"Single Pulse Results for %s" % obsinfo['src'])
ppgplot.pgmtxt('T', 1.5, 0.3, 0.0, 'RA: %s Dec: %s' \
% (obsinfo['RA'], obsinfo['dec']))
ppgplot.pgmtxt('T', 0.5, 0.2, 0.0, 'Telescope: %s MJD: %s Freq: %.1f MHz'\
% (obsinfo['telescope'], obsinfo['MJD'], obsinfo['freq']))
ppgplot.pgsch(0.8)
rank_to_color = {2:15, # grey
0:1, # black
3:5, # cyan
4:11, # dim blue
5:4, # dark blue
6:6} # magenta
# Plotting scheme taken from single_pulse_search.py
# Circles are symbols 20-26 in increasing order
snr_range = 12.0
spthresh = 5.0 # 5 for gbncc (5.5 plotting), 6 for palfa...
for grp in groups:
cand_symbols = []
dm = []
time = []
if grp.rank not in ranks:
continue
if grp.min_dm < yhigh and grp.max_dm > ylow:
ppgplot.pgsci(rank_to_color[grp.rank])
for sp in grp.singlepulses:
dm.append(sp[0])
time.append(sp[2])
cand_SNR = sp[1]
# DEBUG: UNCOMMENT first line, then remove next 2 lines
if np.isfinite(cand_SNR):
cand_symbol = int((cand_SNR-spthresh)/snr_range * 6.0 + 20.5)
else:
cand_symbol = 26
cand_symbols.append(min(cand_symbol, 26)) # biggest circle is 26
cand_symbols = np.array(cand_symbols)
dm = np.array(dm)
time = np.array(time)
for ii in [26, 25, 24, 23, 22, 21, 20]:
inds = np.nonzero(cand_symbols==ii)[0]
ppgplot.pgpt(time[inds], dm[inds], ii)
ppgplot.pgclos()
def print_debug(msg):
if DEBUG:
print(msg)
# print h.heap()
def pop_by_rank(groups, rank):
"""Remove groups with specified rank from a list of groups.
Removes the groups in place; nothing returned.
Inputs:
groups: A list of SinglePulseGroup objects.
rank: The rank of groups to be removed.
Outputs:
None
"""
for j in reversed(list(range(len(groups)))):
if groups[j].rank == rank:
del groups[j]
def rank_occur(groups):
"""Return a dict of the number of groups of each rank in the groups list.
Inputs:
groups: A list of SinglePulseGroup objects.
Outputs:
rank_occur: A dict of ranks and the number of their occurrences
in the groups list.
"""
rank_occur = {}
for grp in groups:
nn = rank_occur.setdefault(grp.rank, 0)
rank_occur[grp.rank] = nn+1
return rank_occur
#@profile
def main():
parser = optparse.OptionParser(prog="rrattrap.py", \
version="Chen Karako, updated by Chitrang Patel(June 23, 2015)",\
usage="%prog --inffile <.inf file> [options] *.singlepulse",\
description="Group single pulse events and rank them based on the sigma behavior. \
Plot DM vs time with different colours for different ranks as follows:\
\t\tRank 1 (Other) : Grey\
\t\tRank 2 (RFI) : Red\
\t\tRank 3 (ok) : Cyan\
\t\tRank 4 (good) : dim blue\
\t\tRank 5 (very good) : dark blue\
\t\tRank 6 (excellent) : Magenta")
parser.add_option('--CLOSE-DM', dest='close_dm', type='float', \
help="DM to below which the signalis considered RFI(Default: 2", \
default=2.0)
parser.add_option('--use-configfile', dest='use_configfile', action='store_true', \
help="If this flag is set - import the config file for selecting grouping" \
"parameters.(Default: do not use a config file.)", default=False)
parser.add_option('--use-DMplan', dest='use_DMplan', action='store_true', \
help="If this flag is set - Use the ddplan for selecting grouping" \
"parameters. Make sure that you have a corresponding config file containing" \
"the DDplan. (Default: do not use ddplan)", default=False)
parser.add_option('--min-group', dest='min_group', type='int', \
help="minimum number of events in a group to no be considered noise." \
"(Default: 45)", \
default=45)
parser.add_option('--dm-thresh', dest='dm_thresh', type='float', \
help="DM threshold to use for nearest neighbour. Suggest a value greater " \
" than the DM step size(Default: 0.5 pc/cm^3 - will not work if DM " \
"step size is greater than 0.5)", default=0.5)
parser.add_option('--time-thresh', dest='time_thresh', type='float', \
help="Time threshold to use for nearest neighbour. Suggest a value that " \
" is a few times the max pulse width(Default: 0.1 s)", default=0.1)
parser.add_option('--min-sigma', dest='min_sigma', type='float', \
help="minimum signal-to-noise above which the group is highly considered" \
"to be astrophysical. (Default: 8.0)", \
default=8.0)
parser.add_option('--no-plot', dest='plot', action='store_false', \
help="Do not plot the groups in the DM time plot." \
"(Default: Make a plot)", default=True)
parser.add_option('--plottype', dest='plottype', type = 'string',\
help="Make a plot using : 'matplotlib' or 'pgplot'."\
, default='pgplot')
parser.add_option('--min-rank-to-plot', dest='min_ranktoplot', type = 'int',\
help="Only groups with rank upto this will plotted.(default: plot \
all except rank 1)", default=0)
parser.add_option('--min-rank-to-write', dest='min_ranktowrite', type = 'int',\
help="Only info of the groups with rank upto this will written." \
"(default: write all except rank 1)", default=0)
parser.add_option('--inffile', dest='inffile', type = 'string',\
help="A .inf file. I suggest a .rfifind.inf file."\
, default=None)
parser.add_option('-o', dest='outbasenm', type = 'string',\
help="outfile base name. .groups.txt will be added to the given name."\
, default='')
options, args = parser.parse_args()
if not hasattr(options, 'inffile'):
raise ValueError("You must supply a .inf file. I suggest .rfifind.inf")
if not options.inffile.endswith(".inf"):
raise ValueError("Cannot recognize file type from extension. "
" Only '.inf' types are supported.")
if options.use_DMplan or options.use_configfile:
import singlepulse.rrattrap_config as rrattrap_config
RANKS = np.asarray([2,0,3,4,5,6])
if options.use_configfile:
CLOSE_DM = rrattrap_config.CLOSE_DM
MIN_GROUP = rrattrap_config.MIN_GROUP
TIME_THRESH = rrattrap_config.TIME_THRESH
DM_THRESH = rrattrap_config.DM_THRESH
MIN_SIGMA = rrattrap_config.MIN_SIGMA
PLOT = rrattrap_config.PLOT
PLOTTYPE = rrattrap_config.PLOTTYPE
RANKS_TO_WRITE = rrattrap_config.RANKS_TO_WRITE
RANKS_TO_PLOT = rrattrap_config.RANKS_TO_PLOT
else:
CLOSE_DM = options.close_dm
MIN_GROUP = options.min_group
TIME_THRESH = options.time_thresh
DM_THRESH = options.dm_thresh
MIN_SIGMA = options.min_sigma
PLOT = options.plot
PLOTTYPE = options.plottype
RANKS_TO_WRITE = list(RANKS[RANKS>options.min_ranktowrite])
RANKS_TO_PLOT = list(RANKS[RANKS>options.min_ranktoplot])
inffile = options.inffile
inf = infodata.infodata(inffile)
print_debug("Beginning read_sp_files... "+strftime("%Y-%m-%d %H:%M:%S"))
groups = spio.read_sp_files(args[1:])[0]
print_debug("Finished read_sp_files, beginning create_groups... " +
strftime("%Y-%m-%d %H:%M:%S"))
print_debug("Number of single pulse events: %d " % len(groups))
groups = create_groups(groups, inffile, min_nearby=1, ignore_obs_end=10, time_thresh=TIME_THRESH, dm_thresh=DM_THRESH, use_dmplan=options.use_DMplan) # ignore the last 10 seconds of the obs, for palfa
print_debug("Number of groups: %d " % len(groups))
print_debug("Finished create_groups, beginning grouping_sp_dmt... " +
strftime("%Y-%m-%d %H:%M:%S"))
grouping_sp_dmt(groups, use_dmplan=options.use_DMplan, time_thresh=TIME_THRESH, dm_thresh=DM_THRESH)
print_debug("Number of groups (after initial grouping): %d " % len(groups))
print_debug("Finished grouping_sp_dmt, beginning flag_noise... " +
strftime("%Y-%m-%d %H:%M:%S"))
flag_noise(groups, use_dmplan=options.use_DMplan, min_group=MIN_GROUP) # do an initial coarse noise flagging and removal
pop_by_rank(groups, 1)
print_debug("Number of groups (after removed noise gps w <10 sps): %d " % len(groups))
print_debug("Beginning grouping_sp_t... " +
strftime("%Y-%m-%d %H:%M:%S"))
# Regroup good groups based on proximity in time only (compensate for missing middles):
groups = grouping_sp_t(groups, use_dmplan=options.use_DMplan, time_thresh=TIME_THRESH, dm_thresh=DM_THRESH)
print_debug("Finished grouping_sp_t. " + strftime("%Y-%m-%d %H:%M:%S"))
# Flag RFI groups, noise
flag_rfi(groups, close_dm=CLOSE_DM)
# Rank groups and identify noise (<45/40/35/30 sp events) groups
print_debug("Ranking groups...")
rank_groups(groups, use_dmplan=options.use_DMplan, min_group=MIN_GROUP, min_sigma=MIN_SIGMA)
# Remove noise groups
print_debug("Before removing noise, len(groups): %s" % len(groups))
pop_by_rank(groups, 1)
print_debug("After removing noise, len(groups): %s" % len(groups))
# Group rfi with very close groups
print_debug("len(groups) before grouping_rfi: %s" % len(groups))
print_debug("Beginning grouping_rfi... " + strftime("%Y-%m-%d %H:%M:%S"))
grouping_rfi(groups, use_dmplan=options.use_DMplan, time_thresh=TIME_THRESH, dm_thresh=DM_THRESH)
print_debug("Finished grouping_rfi. " +
strftime("%Y-%m-%d %H:%M:%S"))
# Rank groups
print_debug("Finished rank_groups, beginning DM span check... " +
strftime("%Y-%m-%d %H:%M:%S"))
# Remove groups that are likely RFI, based on their large span in DM
print_debug("Beginning DM span check...")
check_dmspan(groups, inf.dt, inf.lofreq, inf.lofreq+inf.BW)
print_debug("Finished DM span check, beginning writing to outfile... " +
strftime("%Y-%m-%d %H:%M:%S"))
outfile = open(options.outbasenm+'groups.txt', 'w')
summaryfile = open(options.outbasenm+'spsummary.txt', 'w')
rank_dict = rank_occur(groups)
for rank in sorted(ALL_RANKS_ORDERED):
if rank != 1:
outfile.write("Number of rank %d groups: %d \n" %
(rank, rank_dict.get(rank, 0)))
summaryfile.write("Number of rank %d groups: %d \n" %
(rank, rank_dict.get(rank, 0)))
outfile.write("\n")
summaryfile.close()
# Reverse sort lists so good groups are written at the top of the file
groups.sort(key=lambda x: ALL_RANKS_ORDERED.index(x.rank), reverse=True)
# write list of events in each group
for grp in groups:
if grp.rank in RANKS_TO_WRITE:
outfile.write(str(grp) + '\n') #print group summary
outfile.write('\n')
outfile.write("# DM Sigma Time (s) Sample Downfact \n")
for sp in grp.singlepulses:
outfile.write("%7.2f %7.2f %13.6f %10d %3d \n" % sp)
outfile.write('\n')
outfile.close()
print_debug("Finished writing to outfile, now plotting... " +
strftime("%Y-%m-%d %H:%M:%S"))
if PLOT:
ranks = RANKS_TO_PLOT
# Sort groups so better-ranked groups are plotted on top of worse groups
groups.sort(key=lambda x: ALL_RANKS_ORDERED.index(x.rank))
# create several DM vs t plots, splitting up DM in overlapping intervals
# DMs 0-30, 20-110, 100-300, 300-1000
if PLOTTYPE.lower() == 'pgplot':
# Use PGPLOT to plot
plot_sp_rated_pgplot(groups, ranks, inffile, 0, 30)
print_debug("Finished PGplotting DMs0-30 "+strftime("%Y-%m-%d %H:%M:%S"))
plot_sp_rated_pgplot(groups, ranks, inffile, 20, 110)
print_debug("Finished PGplotting DMs20-110 "+strftime("%Y-%m-%d %H:%M:%S"))
plot_sp_rated_pgplot(groups, ranks, inffile, 100, 310)
print_debug("Finished PGplotting DMs100-310 "+strftime("%Y-%m-%d %H:%M:%S"))
plot_sp_rated_pgplot(groups, ranks, inffile, 300, 1000)
print_debug("Finished PGplotting DMs100-310 "+strftime("%Y-%m-%d %H:%M:%S"))
plot_sp_rated_pgplot(groups, ranks, inffile, 1000, 10000)
print_debug("Finished PGplotting DMs100-310 "+strftime("%Y-%m-%d %H:%M:%S"))
elif PLOTTYPE.lower() == 'matplotlib':
# Use matplotlib to plot
plot_sp_rated_all(groups, ranks, inffile, 0, 30)
print_debug("Finished plotting DMs0-30 "+strftime("%Y-%m-%d %H:%M:%S"))
plot_sp_rated_all(groups, ranks, inffile, 20, 110)
print_debug("Finished plotting DMs20-110 "+strftime("%Y-%m-%d %H:%M:%S"))
plot_sp_rated_all(groups, ranks, inffile, 100, 310)
print_debug("Finished plotting DMs100-310 "+strftime("%Y-%m-%d %H:%M:%S"))
plot_sp_rated_all(groups, ranks, inffile, 300, 1000)
print_debug("Finished plotting DMs300-1000 "+strftime("%Y-%m-%d %H:%M:%S"))
plot_sp_rated_all(groups, ranks, inffile, 1000, 10000)
print_debug("Finished plotting DMs1000-10000 "+strftime("%Y-%m-%d %H:%M:%S"))
else:
print("Plot type must be one of 'matplotlib' or 'pgplot'. Not plotting.")
if __name__ == '__main__':
main()
| 36,982 | 43.773608 | 218 | py |
presto | presto-master/python/presto/singlepulse/sp_pgplot.py | """
Routine for easy to use 1-D and 2-D plotting using 'PGPLOT'
and the Python 'PPGPLOT' package
Written by Scott M. Ransom ([email protected])
last revision: 01 Jul 2000
'PGPLOT' was writtten by Tim Pearson <[email protected]>,
and can be found at http://astro.caltech.edu/~tjp/pgplot/
'PPGPLOT' was written by Nick Patavalis <[email protected]>,
and can be found at http://ariadne.di.uoa.gr/ppgplot/
_or_ an updated version is available in the same directory
where this file was found: ftp://cfa-ftp.harvard.edu/pub/ransom
"""
from __future__ import print_function
from builtins import range
from builtins import object
import numpy as Num
from presto import ppgplot
# True if we have an /XWIN or /XSERVE device open yet
ppgplot_dev_open_ = 0
# True if we have already scaled and/or prepped the current page
ppgplot_dev_prep_ = 0
# Default plotting device
ppgplot_device_ = ""
# Default font to use
ppgplot_font_ = 1
# Default font size to use
ppgplot_font_size_ = 1.0
# Default line style to use
ppgplot_linestyle_ = 1
# Default line width to use
ppgplot_linewidth_ = 2
# Default symbol to plot
ppgplot_symbol_ = None
# Default label color for contour lines
ppgplot_labels_ = None
# Default label interval for labeling contour lines
ppgplot_labelint_ = 20
# Default minimum label interval for labeling contour lines
ppgplot_labelmin_ = 20
# Default line colors to use
ppgplot_color_ = 'white'
# Default color palette for IMAG routines
ppgplot_palette_ = 'rainbow'
# The set of colors for PGPLOT
ppgplot_colors_ = { \
'black':0, 'Black':0, 'BLACK':0, \
'white':1, 'White':1, 'WHITE':1, \
'red':2, 'Red':2, 'RED':2, \
'green':3, 'Green':3, 'GREEN':3, \
'blue':4, 'Blue':4, 'BLUE':4, \
'cyan':5, 'Cyan':5, 'CYAN':5, \
'magenta':6, 'Magenta':6, 'MAGENTA':6, \
'yellow':7, 'Yellow':7, 'YELLOW':7, \
'orange':8, 'Orange':8, 'ORANGE':8, \
'green2':9, 'Green2':9, 'GREEN2':9, \
'green3':10, 'Green3':10, 'GREEN3':10, \
'blue2':11, 'Blue2':14, 'BLUE2':11, \
'purple':12, 'Purple':12, 'PURPLE':12, \
'pink':13, 'Pink':13, 'PINK':13, \
'darkgray':14, 'DarkGray':14, 'DARKGRAY':14, \
'dark gray':14, 'Dark Gray':14, 'DARK GRAY':14, \
'lightgray':15, 'LightGray':15, 'LIGHTGRAY':15, \
'light gray':15, 'Light Gray':15, 'LIGHT GRAY':15 \
}
# Show a 2D color intensity plot with optional arguments and keywords
def plot_waterfall(z, x=None, y=None, title=None, rangex=None, rangey=None, \
rangez=None, labx='', laby='', rangex2=None, rangey2=None, \
labx2='', laby2='', image=ppgplot_palette_, contours=None, \
logx=0, logy=0, logx2=0, logy2=0, \
line=ppgplot_linestyle_, width=ppgplot_linewidth_, \
color=ppgplot_color_, labels=ppgplot_labels_, \
labelint=ppgplot_labelint_, labelmin=ppgplot_labelmin_, \
font=ppgplot_font_, id=0, noscale=0, aspect=1, \
fontsize=ppgplot_font_size_, ticks='out', panels=[1,1], \
device=ppgplot_device_):
"""
plot2d waterfall plot(z, ...)
An interface to make various 2D plots using PGPLOT.
'z' is the 2D Numpy array to be plotted.
The optional entries are:
x: x values (default = 0, 1, ...)
y: y values (default = 0, 1, ...)
title: graph title (default = None)
rangex: range for the x-axis (default = automatic)
rangey: range for the y-axis (default = automatic)
rangez: range for the z-axis (default = automatic)
labx: label for the x-axis (default = None)
laby: label for the y-axis (default = None)
rangex2: range for 2nd x-axis (default = None)
rangey2: range for 2nd y-axis (default = None)
labx2: label for the 2nd x-axis (default = None)
laby2: label for the 2nd y-axis (default = None)
logx: make the 1st x-axis log (default = 0 (no))
logy: make the 1st y-axis log (default = 0 (no))
logx2: make the 2nd x-axis log (default = 0 (no))
logy2: make the 2nd y-axis log (default = 0 (no))
image: color palette for image (default = 'rainbow')
contours: list of contour values (default = None)
line: contour line style (default = 1 (solid))
width: contour line width (default = 1 (thin))
color: contour line color (default = 'white')
labels: color of contour labels (default = None)
labelint: contour label spacing (default = 20)
labelmin: min contour label spacing (default = 20)
font: PGPLOT font to use (default = 1 (normal))
fontsize: PGPLOT font size to use (default = 1.0 (normal))
id: show ID line on plot (default = 0 (no))
noscale: turn off auto scaling (default = 0 (no))
aspect: Aspect ratio (default = 1 (square))
ticks: Ticks point in or out (default = 'out')
panels: Number of subpanels [r,c] (default = [1,1])
device: PGPLOT device to use (default = '')
Note: Many default values are defined in global variables
with names like ppgplot_font_ or ppgplot_device_.
"""
# Make sure the input data is a 2D array
z = Num.asarray(z);
if not len(z.shape)==2:
print('Input data array must be 2 dimensional.')
return
# Announce the global variables we will be using
global ppgplot_dev_open_, ppgplot_dev_prep_, pgpalette
# Define the X and Y axis limits if needed
if x is None: x=Num.arange(z.shape[1], dtype='f')
else: x = Num.asarray(x)
if y is None: y=Num.arange(z.shape[0], dtype='f')
else: y = Num.asarray(y)
# Determine the scaling to use for the axes
if rangex is None:
dx = x[-1]-x[-2]
rangex=[x[0], x[-1]+dx]
if rangey is None:
dy = y[-1]-y[-2]
rangey=[y[0], y[-1]+dy]
if rangez is None: rangez=[Num.minimum.reduce(Num.ravel(z)), \
Num.maximum.reduce(Num.ravel(z))]
if image is not None:
# Set the color indices and the color table
lo_col_ind, hi_col_ind = ppgplot.pgqcol()
lo_col_ind = lo_col_ind + 2
ppgplot.pgscir(lo_col_ind, hi_col_ind)
pgpalette.setpalette(image)
ppgplot.pgctab(pgpalette.l,pgpalette.r,pgpalette.g,pgpalette.b)
# Construct the image
ppgplot.pggray_s(z, 0.0, 0.0, rangex[0], rangey[0], \
rangex[1], rangey[1])
def dm_time_plot(dms, times, sigmas, dm_arr, sigma_arr, time_arr, Total_observed_time, xwin):
"""
Plot DM vs Time subplot for the spd plots.
Input:
dms: list of dms of single pulse events to be plotted.
times: list of times of single pulse events to be plotted.
sigmas: list of sigmas of single pulse events to be plotted.
dm_arr: array of dms of the main single pulse group (plotted in black).
sigma_arr: array of sigmas of the main single pulse group (plotted in black).
time_arr: array of times of single pulse group (plotted in black).
Total_observed_time: float : Total observation time
xwin: True or False. Use xwin or vcps window.
"""
min_dm = Num.min(dms)
max_dm = Num.max(dms)
ppgplot.pgswin(0, Total_observed_time, min_dm, max_dm)
ppgplot.pgsch(0.8)
ppgplot.pgslw(3)
ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
ppgplot.pgslw(3)
ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time (s)")
ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "DM (pc cm\\u-3\\d)")
snr_range = 12.0
cand_symbols = []
cand_symbols_group = []
for i in range(len(sigmas)):
if sigmas[i] > 20.00:
sigmas[i] = 20.0
cand_symbol = int((sigmas[i] - 5.0)/snr_range * 6.0 + 20.5)
cand_symbols.append(min(cand_symbol, 26))
cand_symbols = Num.array(cand_symbols)
for i in range(len(dm_arr)):
cand_symbol = int((sigma_arr[i] - 5.0)/snr_range * 6.0 + 20.5)
cand_symbols_group.append(min(cand_symbol, 26))
cand_symbols_group = Num.array(cand_symbols_group)
dms = Num.array(dms)
times = Num.array(times)
dm_arr = Num.array(dm_arr)
time_arr = Num.array(time_arr)
for ii in [26, 25, 24, 23, 22, 21, 20]:
inds = Num.nonzero(cand_symbols == ii)[0]
ppgplot.pgshls(1, 0.0, 0.5, 0.0)
ppgplot.pgpt(times[inds], dms[inds], ii)
for ii in [26, 25, 24, 23, 22, 21, 20]:
inds_1 = Num.nonzero(cand_symbols_group == ii)[0]
if xwin:
ppgplot.pgshls(1, 0.0, 0.8, 0.0)
else:
ppgplot.pgshls(1, 0.0, 0.0, 0.0)
ppgplot.pgpt(time_arr[inds_1], dm_arr[inds_1], ii)
#########################################################################
class Palette(object):
# Set the color palette
def setpalette(self, palette):
"""
setpalette(self, palette):
Set the color palette for imag-style routines
"""
if (palette == 'rainbow'):
self.l = Num.array([0.0, 0.015, 0.225, 0.4, 0.59,
0.6, 0.775, 0.955, 0.965, 1.0])
self.r = Num.array([1.0, 1.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.947, 1.0, 1.0])
self.g = Num.array([0.0, 0.0, 1.0, 1.0, 1.0,
0.946, 0.0, 0.8, 0.844, 1.0])
self.b = Num.array([0.0, 0.0, 0.0, 0.0, 0.95,
1.0, 1.0, 1.0, 1.0, 1.0])
elif (palette == 'antirainbow'):
self.l = Num.array([0.0, 0.035, 0.045, 0.225, 0.4,
0.41, 0.6, 0.775, 0.985, 1.0])
self.r = Num.array([1.0, 1.0, 0.947, 0.0, 0.0,
0.0, 0.0, 1.0, 1.0, 1.0])
self.g = Num.array([1.0, 0.844, 0.8, 0.0, 0.946,
1.0, 1.0, 1.0, 0.0, 0.0])
self.b = Num.array([1.0, 1.0, 1.0, 1.0, 1.0,
0.95, 0.0, 0.0, 0.0, 0.0])
elif (palette == 'astro'):
self.l = Num.array([0.0, 0.167, 0.333, 0.5,
0.667, 0.833, 1.0])
self.r = Num.array([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0])
self.g = Num.array([0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0])
self.b = Num.array([0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0])
elif (palette == 'hue'):
self.l = Num.array([0.0, 0.167, 0.333, 0.5,
0.667, 0.833, 1.0])
self.r = Num.array([1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0])
self.g = Num.array([0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
self.b = Num.array([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0])
elif (palette == 'heat'):
self.l = Num.array([0.0, 0.48, 0.7, 0.75, 1.0])
self.r = Num.array([0.0, 1.0, 1.0, 1.0, 1.0])
self.g = Num.array([0.0, 0.0, 0.423, 0.519, 1.0])
self.b = Num.array([0.0, 0.0, 0.0, 0.0, 1.0])
elif (palette == 'gamma'):
self.l = Num.array([0.0, 0.33, 0.66, 1.0])
self.r = Num.array([0.3, 1.0, 0.0, 0.0])
self.g = Num.array([0.0, 0.3, 1.0, 0.0])
self.b = Num.array([0.0, 0.0, 0.3, 1.0])
elif (palette == 'antigray' or palette == 'antigrey'):
self.l = Num.array([0.0, 1.0])
self.r = Num.array([1.0, 0.0])
self.g = Num.array([1.0, 0.0])
self.b = Num.array([1.0, 0.0])
elif (palette == 'apjgray' or palette == 'apjgrey'):
self.l = Num.array([0.0, 1.0])
self.r = Num.array([1.0, 0.25])
self.g = Num.array([1.0, 0.25])
self.b = Num.array([1.0, 0.25])
else:
self.l = Num.array([0.0, 1.0])
self.r = Num.array([0.0, 1.0])
self.g = Num.array([0.0, 1.0])
self.b = Num.array([0.0, 1.0])
pgpalette = Palette()
| 12,414 | 42.25784 | 93 | py |
presto | presto-master/python/presto/singlepulse/__init__.py | from presto.singlepulse import spio
from presto.singlepulse import bary_and_topo
from presto.singlepulse import read_spd
from presto.singlepulse import spcand
from presto.singlepulse import sp_pgplot
from presto.singlepulse import rrattrap_config
from presto.singlepulse import rrattrap
#from presto.singlepulse import make_spd
from presto.singlepulse import plot_spd
| 368 | 35.9 | 46 | py |
presto | presto-master/python/presto/singlepulse/plot_spd.py | #! /usr/bin/env python
"""
plot_spd.py
Generate spd plots either using information from the .spd files that are generated by make_spd.py.
Usage: plot_spd.py [OPTIONS] <.spd file> <.singlepulse files (optional:
if not provided, will leave DM vs Time window blank).>
Chitrang Patel - June 10, 2016.
"""
from __future__ import print_function
from builtins import map
import numpy as np
import optparse
import tarfile
from subprocess import Popen, PIPE
from presto.singlepulse import sp_pgplot
from presto.singlepulse import read_spd
from presto.singlepulse import spio
def plot(spdfile, singlepulsefiles=None, spec_width=1.5, loc_pulse=0.5, xwin=False, outfile="spdplot",
just_waterfall=True, integrate_spec=True, integrate_ts=True, disp_pulse=True, tar=None):
"""
Generates spd plots which include the following subplots:
De-dispersed Zero-DM filtered Waterfall plot
De-dispersed Waterfall plot
optional subplots:
Dispersed Zero-DM filtered Waterfall plot (Inset of the corresponding dedispersed plot).
Dispersed Waterfall plot ((Inset of the corresponding dedispersed plot).).
Dedispersed zero-DM filtered time series for the corresponding waterfall plot.
Dedispersed time series for the corresponding waterfall plot.
Spectra of the de-dispersed pulse for each of the above waterfalled plots.
SNR vs DM
DM vs. Time
Inputs:
spdfile: A .spd file.
Optional Inputs:
spec_width: Twice this number times the pulse_width around the pulse to consider for the spectrum
loc_pulse: Fraction of the window length where the pulse is located.(eg. 0.25 = 1/4th of the way in.
0.5 = middle of the plot)
singlepulsefiles: list of .singlepulse files
xwin: plot in an xwin window?
outfile: name of the output file you want.
just_waterfall: Do you only want to display the waterfall plots?
integrate_spec: Do you want to show the pulse spectrum?
integrate_ts: Do you want to show the time series?
disp_pulse: Do you want to show the inset dispersed pulse?
tar: Supply the tarball of the singlepulse files instead of individual files.
"""
if not spdfile.endswith(".spd"):
raise ValueError("The first file must be a .spd file")
# npzfile = np.load(spdfile)
spdobj = read_spd.spd(spdfile)
##### Read in the header information and other required variables for the plots. ######
# text_array = npzfile['text_array']
man_params = spdobj.man_params
fn = spdobj.filename
telescope = spdobj.telescope
RA = spdobj.ra
dec = spdobj.dec
MJD = spdobj.mjd
mjd = Popen(["mjd2cal", "%f" % MJD], stdout=PIPE, stderr=PIPE)
date, err = mjd.communicate()
date = date.split()[2:5]
rank = spdobj.rank
nsub = spdobj.waterfall_nsubs
nbins = spdobj.nsamp
subdm = dm = sweep_dm = spdobj.best_dm
sigma = spdobj.sigma
sample_number = spdobj.pulse_peak_sample
duration = spdobj.waterfall_duration
width_bins = spdobj.pulsewidth_bins
pulse_width = spdobj.pulsewidth_seconds
tsamp = spdobj.tsamp
Total_observed_time = spdobj.total_obs_time
topo_start = spdobj.pulse_peak_time
start = topo_start - loc_pulse * duration
datastart = spdobj.waterfall_start_time
datasamp = spdobj.waterfall_tsamp
datanumspectra = spdobj.waterfall_prededisp_nbins
min_freq = spdobj.min_freq
max_freq = spdobj.max_freq
sweep_duration = spdobj.sweep_duration
sweeped_start = spdobj.sweep_start_time
bary_start = spdobj.bary_pulse_peak_time
downsamp = datasamp / tsamp
if xwin:
pgplot_device = "/XWIN"
else:
pgplot_device = ""
if pgplot_device:
sp_pgplot.ppgplot.pgopen(pgplot_device)
else:
if (outfile == "spdplot"): # default filename
if rank:
sp_pgplot.ppgplot.pgopen(
fn[:-5] + '_DM%.1f_%.1fs_rank_%i.spd.ps/VPS' % (subdm, (start + loc_pulse * duration), rank))
else:
sp_pgplot.ppgplot.pgopen(fn[:-5] + '_DM%.1f_%.1fs.spd.ps/VPS' % (subdm, (start + loc_pulse * duration)))
else:
if rank:
sp_pgplot.ppgplot.pgopen(
outfile + '_DM%.1f_%.1fs_rank_%i.spd.ps/VPS' % (subdm, (start + loc_pulse * duration), rank))
else:
sp_pgplot.ppgplot.pgopen(outfile + '_DM%.1f_%.1fs.spd.ps/VPS' % (subdm, (start + loc_pulse * duration)))
if (just_waterfall == False):
sp_pgplot.ppgplot.pgpap(10.25, 8.5 / 11.0)
# Dedispersed waterfall plot - zerodm - OFF
array = spdobj.data_nozerodm_dedisp.astype(np.float64)
sp_pgplot.ppgplot.pgsvp(0.07, 0.40, 0.50, 0.80)
sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + datanumspectra * datasamp, min_freq, max_freq)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCNST", 0, 0)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Observing Frequency (MHz)")
if not integrate_spec:
sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - Off")
sp_pgplot.plot_waterfall(array, rangex=[datastart - start, datastart - start + datanumspectra * datasamp],
rangey=[min_freq, max_freq], image='apjgrey')
#### Plot Dedispersed Time series - Zerodm filter - Off
Dedisp_ts = array[::-1].sum(axis=0)
times = np.arange(datanumspectra) * datasamp
if integrate_ts:
sp_pgplot.ppgplot.pgsvp(0.07, 0.40, 0.80, 0.90)
sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + duration, np.min(Dedisp_ts),
1.05 * np.max(Dedisp_ts))
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0)
sp_pgplot.ppgplot.pgsci(1)
sp_pgplot.ppgplot.pgline(times, Dedisp_ts)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgsci(1)
errx1 = np.array([0.60 * (datastart - start + duration)])
erry1 = np.array([0.60 * np.max(Dedisp_ts)])
erry2 = np.array([np.std(Dedisp_ts)])
errx2 = np.array([pulse_width])
sp_pgplot.ppgplot.pgerrb(5, errx1, erry1, errx2, 1.0)
sp_pgplot.ppgplot.pgpt(errx1, erry1, -1)
#### Plot Spectrum - Zerodm filter - Off
if integrate_spec:
spectrum_window = spec_width * pulse_width
window_width = int(spectrum_window / datasamp)
# burst_bin = int(datanumspectra*loc_pulse/downsamp)
burst_bin = int(nbins * loc_pulse / downsamp)
on_spec = array[..., burst_bin - window_width:burst_bin + window_width]
Dedisp_spec = on_spec.sum(axis=1)
freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec))
sp_pgplot.ppgplot.pgsvp(0.4, 0.47, 0.5, 0.8)
sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05 * np.max(Dedisp_spec), min_freq, max_freq)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0)
sp_pgplot.ppgplot.pgsci(1)
sp_pgplot.ppgplot.pgline(Dedisp_spec, freqs)
sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - Off")
sp_pgplot.ppgplot.pgsch(0.7)
sp_pgplot.ppgplot.pgmtxt('T', 1.8, 0.5, 0.5, "Spectrum")
sp_pgplot.ppgplot.pgsch(0.8)
# Dedispersed waterfall plot - Zerodm ON
sp_pgplot.ppgplot.pgsvp(0.07, 0.40, 0.1, 0.40)
sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + datanumspectra * datasamp, min_freq, max_freq)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time - %.2f s" % datastart)
sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Observing Frequency (MHz)")
if not integrate_spec:
sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - On")
array = spdobj.data_zerodm_dedisp.astype(np.float64)
sp_pgplot.plot_waterfall(array, rangex=[datastart - start, datastart - start + datanumspectra * datasamp],
rangey=[min_freq, max_freq], image='apjgrey')
#### Plot Dedispersed Time series - Zerodm filter - On
dedisp_ts = array[::-1].sum(axis=0)
times = np.arange(datanumspectra) * datasamp
if integrate_ts:
sp_pgplot.ppgplot.pgsvp(0.07, 0.40, 0.40, 0.50)
sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + duration, np.min(dedisp_ts),
1.05 * np.max(dedisp_ts))
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0)
sp_pgplot.ppgplot.pgsci(1)
sp_pgplot.ppgplot.pgline(times, dedisp_ts)
errx1 = np.array([0.60 * (datastart - start + duration)])
erry1 = np.array([0.60 * np.max(dedisp_ts)])
erry2 = np.array([np.std(dedisp_ts)])
errx2 = np.array([pulse_width])
sp_pgplot.ppgplot.pgerrb(5, errx1, erry1, errx2, 1.0)
sp_pgplot.ppgplot.pgpt(errx1, erry1, -1)
#### Plot Spectrum - Zerodm filter - On
if integrate_spec:
spectrum_window = spec_width * pulse_width
window_width = int(spectrum_window / datasamp)
# burst_bin = int(datanumspectra*loc_pulse/downsamp)
burst_bin = int(nbins * loc_pulse / downsamp)
on_spec = array[..., burst_bin - window_width:burst_bin + window_width]
Dedisp_spec = on_spec.sum(axis=1)
freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec))
sp_pgplot.ppgplot.pgsvp(0.4, 0.47, 0.1, 0.4)
sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05 * np.max(Dedisp_spec), min_freq, max_freq)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0)
sp_pgplot.ppgplot.pgsci(1)
sp_pgplot.ppgplot.pgline(Dedisp_spec, freqs)
sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - On")
sp_pgplot.ppgplot.pgsch(0.7)
sp_pgplot.ppgplot.pgmtxt('T', 1.8, 0.5, 0.5, "Spectrum")
sp_pgplot.ppgplot.pgsch(0.8)
if disp_pulse:
# Sweeped waterfall plot Zerodm - OFF
array = spdobj.data_nozerodm.astype(np.float64)
sp_pgplot.ppgplot.pgsvp(0.20, 0.40, 0.50, 0.70)
sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start + sweep_duration, min_freq, max_freq)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(4)
sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCST", 0, 0)
sp_pgplot.ppgplot.pgsch(3)
sp_pgplot.plot_waterfall(array, rangex=[sweeped_start, sweeped_start + sweep_duration],
rangey=[min_freq, max_freq], image='apjgrey')
delays = spdobj.dmsweep_delays
freqs = spdobj.dmsweep_freqs
sp_pgplot.ppgplot.pgslw(5)
sweepstart = sweeped_start - 0.2 * sweep_duration
sp_pgplot.ppgplot.pgsci(0)
sp_pgplot.ppgplot.pgline(delays + sweepstart, freqs)
sp_pgplot.ppgplot.pgsci(1)
sp_pgplot.ppgplot.pgslw(3)
# Sweeped waterfall plot Zerodm - ON
array = spdobj.data_zerodm.astype(np.float64)
sp_pgplot.ppgplot.pgsvp(0.20, 0.40, 0.1, 0.3)
sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start + sweep_duration, min_freq, max_freq)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(4)
sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCST", 0, 0)
sp_pgplot.ppgplot.pgsch(3)
sp_pgplot.plot_waterfall(array, rangex=[sweeped_start, sweeped_start + sweep_duration],
rangey=[min_freq, max_freq], image='apjgrey')
sp_pgplot.ppgplot.pgslw(5)
sweepstart = sweeped_start - 0.2 * sweep_duration
sp_pgplot.ppgplot.pgsci(0)
sp_pgplot.ppgplot.pgline(delays + sweepstart, freqs)
sp_pgplot.ppgplot.pgsci(1)
#### Figure texts
if integrate_spec:
sp_pgplot.ppgplot.pgsvp(0.81, 0.97, 0.64, 0.909)
sp_pgplot.ppgplot.pgsch(0.62)
else:
sp_pgplot.ppgplot.pgsvp(0.745, 0.97, 0.64, 0.909)
sp_pgplot.ppgplot.pgsch(0.7)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.01, 0.0, "RA: %s" % RA)
sp_pgplot.ppgplot.pgmtxt('T', -2.6, 0.01, 0.0, "DEC: %s" % dec)
sp_pgplot.ppgplot.pgmtxt('T', -4.1, 0.01, 0.0, "MJD: %f" % MJD)
sp_pgplot.ppgplot.pgmtxt('T', -5.6, 0.01, 0.0, "Obs date: %s %s %s" % (date[0], date[1], date[2]))
sp_pgplot.ppgplot.pgmtxt('T', -7.1, 0.01, 0.0, "Telescope: %s" % telescope)
sp_pgplot.ppgplot.pgmtxt('T', -8.6, 0.01, 0.0, "DM: %.2f pc cm\\u-3\\d" % dm)
if sigma:
sp_pgplot.ppgplot.pgmtxt('T', -10.1, 0.01, 0.0, "S/N\\dMAX\\u: %.2f" % sigma)
else:
sp_pgplot.ppgplot.pgmtxt('T', -10.1, 0.01, 0.0, "S/N\\dMAX\\u: N/A")
sp_pgplot.ppgplot.pgmtxt('T', -11.6, 0.01, 0.0, "Number of samples: %i" % nbins)
sp_pgplot.ppgplot.pgmtxt('T', -13.1, 0.01, 0.0, "Number of subbands: %i" % nsub)
sp_pgplot.ppgplot.pgmtxt('T', -14.6, 0.01, 0.0, "Pulse width: %.2f ms" % (pulse_width * 1e3))
sp_pgplot.ppgplot.pgmtxt('T', -16.1, 0.01, 0.0, "Sampling time: %.3f \\gms" % (tsamp * 1e6))
sp_pgplot.ppgplot.pgmtxt('T', -17.6, 0.0, 0.0, "Bary pulse peak time: %.2f s" % (bary_start))
sp_pgplot.ppgplot.pgsvp(0.07, 0.7, 0.01, 0.05)
sp_pgplot.ppgplot.pgmtxt('T', -2.1, 0.01, 0.0, "%s" % fn)
# DM vs SNR
if not man_params:
dm_arr = np.float32(spdobj.dmVt_this_dms)
sigma_arr = np.float32(spdobj.dmVt_this_sigmas)
time_arr = np.float32(spdobj.dmVt_this_times)
if integrate_spec:
sp_pgplot.ppgplot.pgsvp(0.55, 0.80, 0.65, 0.90)
else:
sp_pgplot.ppgplot.pgsvp(0.48, 0.73, 0.65, 0.90)
sp_pgplot.ppgplot.pgswin(np.min(dm_arr), np.max(dm_arr), 0.95 * np.min(sigma_arr), 1.05 * np.max(sigma_arr))
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "DM (pc cm\\u-3\\d)")
sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Signal-to-noise")
sp_pgplot.ppgplot.pgpt(dm_arr, sigma_arr, 20)
else:
dm_arr = np.array([])
sigma_arr = np.array([])
time_arr = np.array([])
if integrate_spec:
sp_pgplot.ppgplot.pgsvp(0.55, 0.80, 0.65, 0.90)
else:
sp_pgplot.ppgplot.pgsvp(0.48, 0.73, 0.65, 0.90)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "DM (pc cm\\u-3\\d)")
sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Signal-to-noise")
# DM vs Time
print("Making arrays for DM vs time plot")
spfiles = singlepulsefiles
threshold = 5.0
if len(spfiles) > 2:
dm_list = list(map(np.float32, list(dm_arr)))
time_list = list(map(np.float32, list(time_arr)))
if integrate_spec:
sp_pgplot.ppgplot.pgsvp(0.55, 0.97, 0.1, 0.54)
else:
sp_pgplot.ppgplot.pgsvp(0.48, 0.97, 0.1, 0.54)
dms, times, sigmas, widths, filelist = spio.gen_arrays(dm_arr, spfiles, tar, threshold)
sp_pgplot.dm_time_plot(dms, times, sigmas, dm_list, sigma_arr, time_list, Total_observed_time, xwin)
else:
print("You need a .singlepulse.tgz file to plot DM vs Time plot.")
if integrate_spec:
sp_pgplot.ppgplot.pgsvp(0.55, 0.97, 0.1, 0.54)
else:
sp_pgplot.ppgplot.pgsvp(0.48, 0.97, 0.1, 0.54)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time (s)")
sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "DM (pc cm\\u-3\\d)")
else:
# sp_pgplot.ppgplot.pgpap(10.25, 10.0/5.0)
sp_pgplot.ppgplot.pgpap(8.0, 1.5)
# Dedispersed waterfall plot - zerodm - OFF
array = spdobj.data_nozerodm_dedisp.astype(np.float64)
sp_pgplot.ppgplot.pgsvp(0.1, 0.70, 0.44, 0.75)
sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + datanumspectra * datasamp, min_freq, max_freq)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCNST", 0, 0)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Observing Frequency (MHz)")
sp_pgplot.plot_waterfall(array, rangex=[datastart - start, datastart - start + datanumspectra * datasamp],
rangey=[min_freq, max_freq], image='apjgrey')
#### Plot Dedispersed Time series - Zerodm filter - Off
Dedisp_ts = array[::-1].sum(axis=0)
times = np.arange(datanumspectra) * datasamp
if integrate_ts:
sp_pgplot.ppgplot.pgsvp(0.1, 0.70, 0.75, 0.83)
sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + duration, np.min(Dedisp_ts),
1.05 * np.max(Dedisp_ts))
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0)
sp_pgplot.ppgplot.pgsci(1)
sp_pgplot.ppgplot.pgline(times, Dedisp_ts)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgsci(1)
errx1 = np.array([0.60 * (datastart - start + duration)])
erry1 = np.array([0.60 * np.max(Dedisp_ts)])
erry2 = np.array([np.std(Dedisp_ts)])
errx2 = np.array([pulse_width])
sp_pgplot.ppgplot.pgerrb(5, errx1, erry1, errx2, 1.0)
sp_pgplot.ppgplot.pgpt(errx1, erry1, -1)
#### Plot Spectrum - Zerodm filter - Off
if integrate_spec:
spectrum_window = spec_width * pulse_width
window_width = int(spectrum_window / datasamp)
# burst_bin = int(datanumspectra*loc_pulse/downsamp)
burst_bin = int(nbins * loc_pulse / downsamp)
on_spec = array[..., burst_bin - window_width:burst_bin + window_width]
Dedisp_spec = on_spec.sum(axis=1)
freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec))
sp_pgplot.ppgplot.pgsvp(0.7, 0.9, 0.44, 0.75)
sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05 * np.max(Dedisp_spec), min_freq, max_freq)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0)
sp_pgplot.ppgplot.pgsci(1)
sp_pgplot.ppgplot.pgline(Dedisp_spec, freqs)
sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - Off")
sp_pgplot.ppgplot.pgsch(0.7)
sp_pgplot.ppgplot.pgmtxt('T', 1.8, 0.5, 0.5, "Spectrum")
sp_pgplot.ppgplot.pgsch(0.8)
# Dedispersed waterfall plot - Zerodm ON
array = spdobj.data_zerodm_dedisp.astype(np.float64)
sp_pgplot.ppgplot.pgsvp(0.1, 0.70, 0.05, 0.36)
sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + datanumspectra * datasamp, min_freq, max_freq)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time - %.2f s" % datastart)
sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Observing Frequency (MHz)")
sp_pgplot.plot_waterfall(array, rangex=[datastart - start, datastart - start + datanumspectra * datasamp],
rangey=[min_freq, max_freq], image='apjgrey')
#### Plot Dedispersed Time series - Zerodm filter - On
dedisp_ts = array[::-1].sum(axis=0)
times = np.arange(datanumspectra) * datasamp
if integrate_ts:
sp_pgplot.ppgplot.pgsvp(0.1, 0.7, 0.36, 0.44)
sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + duration, np.min(dedisp_ts),
1.05 * np.max(dedisp_ts))
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0)
sp_pgplot.ppgplot.pgsci(1)
sp_pgplot.ppgplot.pgline(times, dedisp_ts)
errx1 = np.array([0.60 * (datastart - start + duration)])
erry1 = np.array([0.60 * np.max(dedisp_ts)])
erry2 = np.array([np.std(dedisp_ts)])
errx2 = np.array([pulse_width])
sp_pgplot.ppgplot.pgerrb(5, errx1, erry1, errx2, 1.0)
sp_pgplot.ppgplot.pgpt(errx1, erry1, -1)
#### Plot Spectrum - Zerodm filter - On
if integrate_spec:
spectrum_window = spec_width * pulse_width
window_width = int(spectrum_window / datasamp)
# burst_bin = int(datanumspectra*loc_pulse/downsamp)
burst_bin = int(nbins * loc_pulse / downsamp)
on_spec = array[..., burst_bin - window_width:burst_bin + window_width]
Dedisp_spec = on_spec.sum(axis=1)
freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec))
sp_pgplot.ppgplot.pgsvp(0.70, 0.90, 0.05, 0.36)
sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05 * np.max(Dedisp_spec), min_freq, max_freq)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0)
sp_pgplot.ppgplot.pgsci(1)
sp_pgplot.ppgplot.pgline(Dedisp_spec, freqs)
sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - On")
sp_pgplot.ppgplot.pgsch(0.7)
sp_pgplot.ppgplot.pgmtxt('T', 1.8, 0.5, 0.5, "Spectrum")
sp_pgplot.ppgplot.pgsch(0.8)
if disp_pulse:
# Sweeped waterfall plot Zerodm - OFF
array = spdobj.data_nozerodm.astype(np.float64)
sp_pgplot.ppgplot.pgsvp(0.3, 0.70, 0.44, 0.65)
sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start + sweep_duration, min_freq, max_freq)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(4)
sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCST", 0, 0)
sp_pgplot.ppgplot.pgsch(3)
sp_pgplot.plot_waterfall(array, rangex=[sweeped_start, sweeped_start + sweep_duration],
rangey=[min_freq, max_freq], image='apjgrey')
delays = spdobj.dmsweep_delays
freqs = spdobj.dmsweep_freqs
sp_pgplot.ppgplot.pgslw(5)
sweepstart = sweeped_start - 0.2 * sweep_duration
sp_pgplot.ppgplot.pgsci(0)
sp_pgplot.ppgplot.pgline(delays + sweepstart, freqs)
sp_pgplot.ppgplot.pgsci(1)
sp_pgplot.ppgplot.pgslw(3)
# Sweeped waterfall plot Zerodm - ON
array = spdobj.data_zerodm.astype(np.float64)
sp_pgplot.ppgplot.pgsvp(0.3, 0.70, 0.05, 0.25)
sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start + sweep_duration, min_freq, max_freq)
sp_pgplot.ppgplot.pgsch(0.8)
sp_pgplot.ppgplot.pgslw(4)
sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCST", 0, 0)
sp_pgplot.ppgplot.pgsch(3)
sp_pgplot.plot_waterfall(array, rangex=[sweeped_start, sweeped_start + sweep_duration],
rangey=[min_freq, max_freq], image='apjgrey')
sp_pgplot.ppgplot.pgslw(5)
sweepstart = sweeped_start - 0.2 * sweep_duration
sp_pgplot.ppgplot.pgsci(0)
sp_pgplot.ppgplot.pgline(delays + sweepstart, freqs)
sp_pgplot.ppgplot.pgsci(1)
#### Figure texts
sp_pgplot.ppgplot.pgsvp(0.05, 0.95, 0.8, 0.9)
sp_pgplot.ppgplot.pgsch(0.65)
sp_pgplot.ppgplot.pgslw(3)
sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.01, 0.0, "RA: %s" % RA)
sp_pgplot.ppgplot.pgmtxt('T', -2.5, 0.01, 0.0, "DEC: %s" % dec)
sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.01, 0.0, "MJD: %f" % MJD)
sp_pgplot.ppgplot.pgmtxt('T', -5.3, 0.01, 0.0, "Obs date: %s %s %s" % (date[0], date[1], date[2]))
sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.35, 0.0, "Telescope: %s" % telescope)
sp_pgplot.ppgplot.pgmtxt('T', -2.5, 0.35, 0.0, "DM: %.2f pc cm\\u-3\\d" % dm)
if sigma:
sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.35, 0.0, "S/N\\dMAX\\u: %.2f" % sigma)
else:
sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.35, 0.0, "S/N\\dMAX\\u: N/A")
sp_pgplot.ppgplot.pgmtxt('T', -5.3, 0.35, 0.0, "Number of samples: %i" % nbins)
sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.65, 0.0, "Number of subbands: %i" % nsub)
sp_pgplot.ppgplot.pgmtxt('T', -2.5, 0.65, 0.0, "Pulse width: %.2f ms" % (pulse_width * 1e3))
sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.65, 0.0, "Sampling time: %.3f \gms" % (tsamp * 1e6))
sp_pgplot.ppgplot.pgmtxt('T', -5.3, 0.65, 0.0, "Bary pulse peak time: %.2f s" % (bary_start))
sp_pgplot.ppgplot.pgiden()
sp_pgplot.ppgplot.pgclos()
def main():
parser = optparse.OptionParser(prog="plot_spd.py",
usage="%prog [OPTIONS] INFILE (.spd file) INFILES (.singlepulse files)")
parser.add_option("-x", "--xwin", action="store_true", dest="xwin",
default=False, help="Don't make a postscript plot, just use an X-window")
parser.add_option("-o", dest="outfile", type="string", default="spdplot",
help="give a base name to the saved plot. DM, time and"
"rank values will be added automatically")
parser.add_option("--spec-width", dest="spec_width", type="float", help="Twice this number times the pulse width"
"is the window around the pulse considered for the spectrum. (Default: 1.5)",
default=1.5)
parser.add_option("--loc", dest="loc_pulse", type="float",
help="Fraction of the window length where the pulse is located."
"(Default: 0.5 half way in.)",
default=0.5)
parser.add_option("--just-waterfall", action="store_true", dest="just_waterfall",
default=False, help="Just produce the waterfall plots.")
parser.add_option("--show-spec", action="store_true", dest="integrate_spec",
default=False, help="Show spectrum.(Default: Show spectrum)")
parser.add_option("--show-ts", action="store_true", dest="integrate_ts",
default=False, help="Show time series.(Default: Don't show time series)")
parser.add_option("--show-sweep", action="store_true", dest="disp_pulse",
default=False, help="Show dispersed pulse.(Default: Don't show dispersed pulse)")
(options, args) = parser.parse_args()
if len(args) == 0:
raise ValueError("need a .spd file and .singlepulse files in that order.")
if not args[0].endswith(".spd"):
raise ValueError("the first file must be a .spd file")
if len(args) == 2:
tar = tarfile.open(args[1], "r:gz") # read in the tarball
filenames = tar.getnames() # get the filenames
plot(args[0], filenames, options.spec_width, options.loc_pulse, options.xwin, options.outfile,
options.just_waterfall,
options.integrate_spec, options.integrate_ts, options.disp_pulse, tar) # make the sp plots
tar.close()
else:
plot(args[0], args[1:], options.spec_width, options.loc_pulse, options.xwin, options.outfile,
options.just_waterfall,
options.integrate_spec, options.integrate_ts, options.disp_pulse, tar=None) # make the sp plots
if __name__ == '__main__':
main()
| 29,248 | 52.083485 | 153 | py |
presto | presto-master/python/presto/singlepulse/rrattrap_config.py | #! /usr/bin/env python
from builtins import range
CLOSE_DM = 2 # pc cm-3
# MIN_GROUP, DM_THRESH, TIME_THRESH will change later on depending on the DDplan.
MIN_GROUP = 50 #minimum group size that is not considered noise
TIME_THRESH = 0.1
DM_THRESH = 0.5
MIN_SIGMA = 8.0
PLOT = True
PLOTTYPE = 'pgplot' # 'pgplot' or 'matplotlib'
RANKS_TO_WRITE = [2,0,3,4,5,6]
RANKS_TO_PLOT = [2,3,4,5,6]
########This is specific to PALFA. You can make a similar plan depending
########on the survey's DDplan. The values are the lodm, factor that multiplies the
######## dm_thresh and time_thresh (I recommend choosing the downsampling factor) and
######## the minimum group size.
######### lodm, dmt(factor), min_group
DMPLAN = [(0.0, 1, 45),
(212.8, 2, 40),
(443.2, 3, 35),
(534.4, 5, 30),
(876.4, 6, 30),
(990.4, 10, 30)]
def use_dmplan(dm):
""" Sets a factor which multiplies the DMthreshold and time_threshold. The factor is
the downsampling rate.
This makes the DM_THRESH and TIME_THRESH depend on the DM instead of having fixed
values throughout. This helps at higher DMs where the DM step size is > 0.5 pc cm-3.
"""
for i in range(len(DMPLAN)-1):
if dm<=DMPLAN[i+1][0] and dm>DMPLAN[i][0]:
dmt, min_group = DMPLAN[i][1], DMPLAN[i][2]
if dm > DMPLAN[i+1][0]:
dmt, min_group = DMPLAN[i+1][1], DMPLAN[i+1][2]
return dmt, min_group
| 1,473 | 35.85 | 93 | py |
presto | presto-master/python/presto/singlepulse/read_spd.py | from builtins import object
import numpy as _np
from scipy.special import erf as _erf
import sys
class spd(object):
"""
A class for reading in single pulse files.
A quick description of each item in the class:
data_zerodm
A 2D freq-vs-time array around the pulse, not dedispersed (zero-DM'd data)
data_zerodm_dedisp
A 2D freq-vs-time array around the pulse, dedispersed (zero-DM'd data)
data_nozerodm
A 2D freq-vs-time array around the pulse, not dedispersed (non-zero-DM'd data)
data_nozerodm_dedisp
A 2D freq-vs-time array around the pulse, dedispersed (non-zero-DM'd data)
spfiles
PRESTO single pulse files for the DM-vs-time scatterplot
dmVt_this_dms
DM values for this pulse in the DM-vs-time scatterplot
dmVt_this_times
Time values for this pulse in the DM-vs-time scatterplot
dmVt_this_sigmas
Sigma values (determining point size) for this pulse in the DM-vs-time scatterplot
dmVt_this_widths
box car values for this pulse.
dmsweep_delays
Delays corresponding to frequencies for drawn-in dispersion sweep
dmsweep_freqs
Frequencies corresponding to delays for drawn-in dispersion sweep
filename
Name of the observation file that was analyzed
telescope
Which telescope was used
ra
Right ascension as hh:mm:ss.s string
dec
Declination as dd:mm:ss.s string
ra_deg
Right ascension in degrees
dec_deg
Declination in degrees
mjd
Observation MJD
total_obs_time
Total duration of the observation this pulse was found in, in seconds
rank
Single pulse sifting rank
tsamp
Sampling time of raw data in seconds
best_dm
Best determined dispersion measure for this event
sigma
Significance of this event
pulse_peak_sample
The sample number in the full dedispersed time series at which this event peaked
pulse_peak_time
The time in seconds in the full dedispersed time series at which this event peaked
pulsewidth_bins
The width of the boxcar filter used to optimally detect this event, in number of bins
pulsewidth_seconds
The width of the boxcar filter used to optimally detect this event, in seconds
nsamp
The number of original time series samples included in the (possibly downsampled) waterfall plot
waterfall_duration
The total duration of the dedispersed waterfall plot
waterfall_start_time
The time (in seconds) in the full dedispersed time series at which the waterfall plot begins
waterfall_tsamp
Sampling time of the waterfall plot in seconds
waterfall_nbins
The number of samples across the dedispersed waterfall plot
waterfall_nsubs
The number of frequency bins across the waterfall plot
waterfall_prededisp_nbins
The number of samples prior to dedispersing and cutting off the ends of the waterfall plot
min_freq
The lowest frequency plotted
max_freq
The highest frequency plotted
sweep_duration
The total duration of the dispersed pulse across the band
sweep_start_time
The time at which to start plotting the dispersed reference line
"""
def __init__(self, spd_file):
dd = dict(_np.load(spd_file))
self.data_zerodm = dd['Data_zerodm']
self.data_zerodm_dedisp = dd['Data_dedisp_zerodm']
self.data_nozerodm = dd['Data_nozerodm']
self.data_nozerodm_dedisp = dd['Data_dedisp_nozerodm']
self.dmsweep_delays = dd['delays_nozerodm']
self.dmsweep_freqs = dd['freqs_nozerodm']
ll = dd['text_array']
self.filename = ll[0]
self.telescope = ll[1]
self.ra = ll[2]
self.dec = ll[3]
self.ra_deg = _np.sum(_np.array(self.ra.split(":"), dtype=float) * _np.array([15., 15./60., 15./3600.]))
dec_arr = _np.array(self.dec.split(":"), dtype=float)
self.dec_deg = _np.sum(_np.abs(dec_arr) * _np.sign(dec_arr[0]) * _np.array([1., 1./60., 1./3600.]))
self.mjd = float(ll[4])
self.total_obs_time = float(ll[15])
if ll[5]:
self.rank = int(ll[5])
else:
self.rank=None
self.tsamp = float(ll[14])
self.best_dm = float(ll[8])
if ll[9]:
self.sigma = float(ll[9])
else:
self.sigma = None
self.pulse_peak_sample = int(ll[10])
self.pulse_peak_time = float(ll[16])
self.pulsewidth_bins = int(ll[12])
self.pulsewidth_seconds = float(ll[13])
self.nsamp = int(ll[7])
self.waterfall_duration = float(ll[11])
self.waterfall_start_time = float(ll[17])
self.waterfall_tsamp = float(ll[18])
self.waterfall_nbins = self.data_zerodm_dedisp.shape[1]
self.waterfall_nsubs = int(ll[6])
self.waterfall_prededisp_nbins = int(ll[19])
self.waterfall_downsamp = int(_np.round(self.waterfall_tsamp/self.tsamp))
self.min_freq = float(ll[20])
self.max_freq = float(ll[21])
self.sweep_duration = float(ll[22])
self.sweep_start_time = float(ll[23])
self.bary_pulse_peak_time = float(ll[24])
self.man_params = ll[25]
#self.spfiles = dd['singlepulse_files']
if not self.man_params:
self.dmVt_this_dms = dd['dm_arr']
self.dmVt_this_times = _np.array(dd['time_list'])
self.dmVt_this_sigmas = dd['sigma_arr']
self.dmVt_this_widths = dd['width_arr']
# Get variance from the half of the waterfall plot that definitely should not contain the pulse
# (which is 1/4 of the way into the plot)
self.varprof = _np.var(self.data_zerodm_dedisp.sum(axis=0)[(self.waterfall_nbins/2):])
#### Print the spd information ####
sys.stdout.write("\n")
sys.stdout.write("\t\tReading Header information: \n")
sys.stdout.write("\n")
sys.stdout.write(" filename = %s\n"%self.filename)
sys.stdout.write(" telescope = %s\n"%self.telescope)
sys.stdout.write(" RA = %s\n"%self.ra)
sys.stdout.write(" DEC = %s\n"%self.dec)
sys.stdout.write(" min_freq = %0.2f\n"%self.min_freq)
sys.stdout.write(" max_freq = %0.2f\n"%self.max_freq)
sys.stdout.write(" tsamp = %f\n"%self.tsamp)
sys.stdout.write(" Total_obs_time = %f\n"%self.total_obs_time)
sys.stdout.write(" MJD = %f\n"%self.mjd)
sys.stdout.write(" DM = %.2f\n"%self.best_dm)
if self.sigma:
sys.stdout.write(" sigma = %.2f\n"%self.sigma)
if self.rank:
sys.stdout.write(" rank = %i\n"%self.rank)
sys.stdout.write(" pulse_peak_sample = %i\n"%self.pulse_peak_sample)
sys.stdout.write(" pulse_peak_time = %0.2f\n"%self.pulse_peak_time)
sys.stdout.write(" pulse_width_bins = %i\n"%self.pulsewidth_bins)
sys.stdout.write(" pulse_width_secs = %0.2f\n"%self.pulsewidth_seconds)
sys.stdout.write(" nsamp = %i\n"%self.nsamp)
sys.stdout.write(" waterfall_duration = %0.2f\n"%self.waterfall_duration)
sys.stdout.write(" waterfall_start_time = %0.2f\n"%self.waterfall_start_time)
sys.stdout.write(" waterfall_tsamp = %0.2f\n"%self.waterfall_tsamp)
sys.stdout.write(" waterfall_nsubs = %i\n"%self.waterfall_nsubs)
sys.stdout.write(" waterfall_prededisp_nbins = %i\n"%self.waterfall_prededisp_nbins)
sys.stdout.write(" sweep_duration = %0.2f\n"%self.sweep_duration)
sys.stdout.write(" sweep_start_time = %0.2f\n"%self.sweep_start_time)
sys.stdout.write(" bary_pulse_peak_time = %0.2f\n"%self.bary_pulse_peak_time)
def waterfall_time_axis(self, use_timeseries_time=False):
"""
Generate a time axis for the waterfall plot in seconds, either beginning
at zero or at the duration into the time series at which the plot actually
begins.
"""
self.waterfall_tsamp
self.waterfall_start_time
self.waterfall_nbins
time_axis = _np.arange(0, self.waterfall_duration, self.waterfall_tsamp)[:self.waterfall_nbins]
if use_timeseries_time: return time_axis + self.waterfall_start_time
else: return time_axis
def waterfall_freq_axis(self):
"""
Generate a frequency axis for the waterfall plot.
"""
return _np.linspace(self.min_freq, self.max_freq, self.waterfall_nsubs, endpoint=False)
def ddm_response(ddm, width_ms, band_MHz=(1214., 1537.)):
"""
Returns the factor by which the S/N of a pulse of a given width observed
in a particular radio band should decrease given an error in dispersion
measure.
ddm
Difference from optimal dispersion measure in pc/cm^3
width_ms
Pulse width in milliseconds
band
The bottom and top of the observing band in MHz
(default: the Arecibo Mock band)
"""
if _np.isscalar(ddm):
ddm = _np.array([ddm])
scal = True
else:
ddm = _np.array(ddm)
scal = False
band_MHz = _np.array(band_MHz)
zeta = 6.91e-3 * ddm * _np.diff(band_MHz)[0] / (width_ms * (_np.mean(band_MHz)/1000.)**3)
result = _np.zeros_like(ddm)
where_nonzero = _np.where(zeta != 0)
result[where_nonzero] = 0.5*_np.sqrt(_np.pi)*_erf(zeta[where_nonzero])/zeta[where_nonzero]
result[zeta == 0] = 1.
if scal: return result[0]
else: return result
| 9,961 | 41.033755 | 112 | py |
presto | presto-master/python/presto/singlepulse/make_spd.py | #!/usr/bin/env python
"""
make_spd.py
Make single pulse plots which include the waterfall plots and dedispersed time series with Zero-DM On/Off.
Also includes Signal-to-noise vs DM and DM vs Time subplots.
Usage on the command line:
python make_spd.py [OPTIONS] <psrfits file> <singlepulse files>
Chitrang Patel - May. 21, 2015 -- Updated on June 10 2016
"""
from __future__ import print_function
from __future__ import absolute_import
from builtins import map
from builtins import range
from time import strftime
import numpy as np
import optparse
from presto import waterfaller
from presto import psr_utils
from presto.singlepulse import plot_spd
from presto.singlepulse import spcand
from presto.singlepulse import spio
from presto import psrfits
from presto import filterbank
#import filterbank need to implement in PRESTO
DEBUG = True
def print_debug(msg):
if DEBUG:
print(msg)
def waterfall_array(rawdatafile, start, duration, dm, nbins, nsub, subdm, zerodm,
downsamp, scaleindep, width_bins, mask, maskfn, bandpass_corr):
"""
Runs the waterfaller. If dedispersing, there will be extra bins added to the 2D plot.
Inputs:
Inputs required for the waterfaller. dm, nbins, etc.
Outputs:
data: 2D array as an "object"
array: 2D array ready to be plotted by sp_pgplot.plot_waterfall(array).
"""
data, bins, nbins, start = waterfaller.waterfall(rawdatafile, start, duration, dm=dm, nbins=nbins,
nsub=nsub, subdm=subdm, zerodm=zerodm,
downsamp=downsamp, scaleindep=scaleindep,
width_bins=width_bins, mask=mask,
maskfn=maskfn, bandpass_corr=bandpass_corr)
array = np.array(data.data)
if dm is not None: # If dedispersing the data, extra bins will be added. We need to cut off the extra bins to get back the appropriate window size.
ragfac = float(nbins)/bins
dmrange, trange = array.shape
nbinlim = np.int(trange * ragfac)
else:
nbinlim = nbins
array = array[..., :nbinlim]
array = (array[::-1]).astype(np.float16)
return data, array
def make_spd_from_file(spdcand, rawdatafile,
txtfile, maskfile,
min_rank, group_rank,
plot, just_waterfall,
integrate_ts, integrate_spec, disp_pulse,
loc_pulse, nsub,
maxnumcands,
basename,
mask=False, bandpass_corr=True, barytime=True,
man_params=None):
"""
Makes spd files from output files of rratrap.
Inputs:
spdcand: spcand parameters instance (read in spcand.params)
rawdatafile: psrfits file instance
txtfile: rratrap output file (groups.txt file)
maskfile: rfifind mask file. need this file if you want to remove the bandpass
or use rfifind mask information.
min_rank: plot all groups with rank more than this. min 1, max 6
group_rank: plot groups ranked whatever you specify
plot: do you want to produce the plots as well?
just_waterfall: Do you just want to make the waterfall plots.
integrate_ts: Do you want to display the dedispersed time series in the plot?
integrate_spec: Do you want to display the pulse spectrum in the plot?
disp_pulse: Do you want to see the inset dispersed pulse in the plot?
loc_pulse: Fraction of the window length where the pulse is located.(eg. 0.25 = 1/4th of the way in.
0.5 = middle of the plot)
maxnumcands: What is the maximum number of candidates you would like to generate?
basename: output basename of the file. Appended with _DM_TIME(s)_RANK.spd
Optional arguments:
mask: Do you want to mask out rfi contaminated channels?
bandpass_corr: Do you want to remove the bandpass?
barytime: Is the given time(s) barycentric?
man_params: Do you want to specify the parameters for waterfalling
manually? If yes, I suggest using the function make_spd_from_man_params().
(I suggest giving it the rratrap output file)
Outputs:
Binary npz file containing the necessary arrays and header information to generate the spd plots.
"""
numcands=0 # counter for max number of candidates
loop_must_break = False # dont break the loop unless num of cands >100.
files = spio.get_textfile(options.txtfile)
if group_rank:
groups=[group_rank-1]
else:
groups = [i for i in range(6) if(i>=min_rank)][::-1]
for group in groups:
rank = group+1
if files[group] != "Number of rank %i groups: 0 "%rank:
values = spio.split_parameters(rank, txtfile)
lis = np.where(files == '\tRank: %i.000000'%rank)[0]
for ii in range(len(values)):
#### Arrays for Plotting DM vs SNR
dm_list, time_list, dm_arr, sigma_arr, width_arr = spio.read_RRATrap_info(txtfile, lis[ii], rank)
# Array for Plotting Dedispersed waterfall plot - zerodm - OFF
spdcand.read_from_file(values[ii], rawdatafile.tsamp, rawdatafile.specinfo.N,
rawdatafile.frequencies[0], rawdatafile.frequencies[-1],
rawdatafile, loc_pulse=loc_pulse, dedisp = True,
scaleindep = None, zerodm = None, mask = mask,
barytime=barytime,
nsub = nsub, bandpass_corr = bandpass_corr)
#make an array to store header information for the spd files
temp_filename = basename+"_DM%.1f_%.1fs_rank_%i"%(spdcand.subdm,
spdcand.topo_start_time, rank)
print_debug("Running waterfaller with Zero-DM OFF...")
# Add additional information to the header information array
data, Data_dedisp_nozerodm = waterfall_array(rawdatafile, spdcand.start,
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub,
spdcand.subdm, spdcand.zerodm, spdcand.downsamp,
spdcand.scaleindep, spdcand.width_bins,
spdcand.mask, maskfile, spdcand.bandpass_corr)
text_array = np.array([args[0], rawdatafile.specinfo.telescope,
rawdatafile.specinfo.ra_str, rawdatafile.specinfo.dec_str,
rawdatafile.specinfo.start_MJD[0],
rank, spdcand.nsub, spdcand.nbins, spdcand.subdm,
spdcand.sigma, spdcand.sample_number, spdcand.duration,
spdcand.width_bins, spdcand.pulse_width, rawdatafile.tsamp,
rawdatafile.specinfo.T, spdcand.topo_start_time, data.starttime,
data.dt,data.numspectra, data.freqs.min(), data.freqs.max()])
#### Array for plotting Dedispersed waterfall plot zerodm - ON
print_debug("Running Waterfaller with Zero-DM ON...")
zerodm=True
data, Data_dedisp_zerodm = waterfall_array(rawdatafile, spdcand.start,
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub,
spdcand.subdm, zerodm, spdcand.downsamp,
spdcand.scaleindep, spdcand.width_bins,
spdcand.mask, maskfile, spdcand.bandpass_corr)
####Sweeped without zerodm
spdcand.read_from_file(values[ii], rawdatafile.tsamp, rawdatafile.specinfo.N,
rawdatafile.frequencies[0], rawdatafile.frequencies[-1],
rawdatafile, loc_pulse=loc_pulse, dedisp = None,
scaleindep = None, zerodm = None, mask = mask,
barytime=barytime,
nsub = nsub, bandpass_corr = bandpass_corr)
data, Data_nozerodm = waterfall_array(rawdatafile, spdcand.start,
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub,
spdcand.subdm, spdcand.zerodm, spdcand.downsamp,
spdcand.scaleindep, spdcand.width_bins,
spdcand.mask, maskfile, spdcand.bandpass_corr)
text_array = np.append(text_array, spdcand.sweep_duration)
text_array = np.append(text_array, data.starttime)
text_array = np.append(text_array, spdcand.bary_start_time)
text_array = np.append(text_array, man_params)
# Array to Construct the sweep
if spdcand.sweep_dm is not None:
ddm = spdcand.sweep_dm-data.dm
delays = psr_utils.delay_from_DM(ddm, data.freqs)
delays -= delays.min()
delays_nozerodm = delays
freqs_nozerodm = data.freqs
# Sweeped with zerodm-on
zerodm = True
#downsamp_temp = 1
data, Data_zerodm = waterfall_array(rawdatafile, spdcand.start,
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub,
spdcand.subdm, zerodm, spdcand.downsamp,
spdcand.scaleindep, spdcand.width_bins,
spdcand.mask, maskfile, spdcand.bandpass_corr)
# Saving the arrays into the .spd file.
with open(temp_filename+".spd", 'wb') as f:
np.savez_compressed(f,
Data_dedisp_nozerodm = Data_dedisp_nozerodm.astype(np.float16),
Data_dedisp_zerodm = Data_dedisp_zerodm.astype(np.float16),
Data_nozerodm = Data_nozerodm.astype(np.float16),
delays_nozerodm = delays_nozerodm,
freqs_nozerodm = freqs_nozerodm,
Data_zerodm = Data_zerodm.astype(np.float16),
dm_arr= list(map(np.float16, dm_arr)),
sigma_arr = list(map(np.float16, sigma_arr)),
width_arr =list(map(np.uint8, width_arr)),
dm_list= list(map(np.float16, dm_list)),
time_list = list(map(np.float16, time_list)),
text_array = text_array)
#### Arrays for Plotting DM vs Time is in plot_spd.plot(...)
if plot:
print_debug("Now plotting...")
plot_spd.plot(temp_filename+".spd", args[1:],
spec_width=1.5, loc_pulse=loc_pulse,
xwin=False, outfile=basename,
just_waterfall=just_waterfall,
integrate_spec=integrate_spec,
integrate_ts=integrate_ts,
disp_pulse=disp_pulse, tar = None)
print_debug("Finished plot %i " %ii+strftime("%Y-%m-%d %H:%M:%S"))
numcands+= 1
print_debug('Finished sp_candidate : %i'%numcands)
if numcands >= maxnumcands: # Max number of candidates to plot 100.
loop_must_break = True
break
if loop_must_break:
break
print_debug("Finished group %i... "%rank+strftime("%Y-%m-%d %H:%M:%S"))
print_debug("Finished running waterfaller... "+strftime("%Y-%m-%d %H:%M:%S"))
def make_spd_from_man_params(spdcand, rawdatafile,
txtfile, maskfile,
plot, just_waterfall,
subdm, dm, sweep_dm,
sigma,
start_time, duration,
width_bins, nbins, downsamp,
nsub,
scaleindep,
spec_width, loc_pulse,
integrate_ts, integrate_spec, disp_pulse,
basename,
mask, bandpass_corr, barytime, man_params):
"""
Makes spd files from output files of rratrap.
Inputs:
spdcand: spcand parameters instance (read in spcand.params)
rawdatafile: psrfits file instance
txtfile: rratrap output file (groups.txt file)
maskfile: rfifind mask file. need this file if you want to remove the bandpass
or use rfifind mask information.
plot: do you want to produce the plots as well?
just_waterfall: Do you just want to make the waterfall plots.
subdm: DM to use when subbanding.
dm: DM to use when dedispersing data for plot.
sweep_dm: Show the frequency sweep using this DM.
sigma: signal-to-noise of the pulse
start_time: start time of the data to be read in for waterfalling.
duration: duration of data to be waterfalled.
width_bins: Smooth each channel/subband with a boxcar width_bins wide.
nbins: Number of time bins to plot. This option overrides
the duration argument.
downsamp: Factor to downsample in time by. Default: Don't downsample.
nsub: Number of subbands to use. Must be a factor of number of channels.
scaleindep:Do you want to scale each subband independently?(Type: Boolean)
spec_width: Twice this number times the pulse_width around the pulse to consider for the spectrum
loc_pulse: Fraction of the window length where the pulse is located.(eg. 0.25 = 1/4th of the way in.
0.5 = middle of the plot)
integrate_ts: Do you want to display the dedispersed time series in the plot?
integrate_spec: Do you want to display the pulse spectrum in the plot?
disp_pulse: Do you want to see the inset dispersed pulse in the plot?
basename: output basename of the file. Appended with _DM_TIME(s)_RANK.spd
mask: Do you want to mask out rfi contaminated channels?
bandpass_corr: Do you want to remove the bandpass?
barytime: Is the given time(s) barycentric?
man_params: Do you want to specify the parameters for waterfalling
manually? If yes, I suggest using the function make_spd_from_man_params().
(I suggest giving it the rratrap output file)
Outputs:
Binary npz file containing the necessary arrays and header information to generate the spd plots.
"""
rank = None
if not nsub:
nsub = rawdatafile.nchan
# Array for Plotting Dedispersed waterfall plot - zerodm - OFF
spdcand.manual_params(subdm, dm, sweep_dm, sigma, start_time,
width_bins, downsamp, duration, nbins, nsub, rawdatafile.tsamp,
rawdatafile.specinfo.N,
rawdatafile.frequencies[0], rawdatafile.frequencies[-1], rawdatafile,
loc_pulse=loc_pulse, dedisp=True, scaleindep=False, zerodm=False,
mask=mask, barytime=barytime, bandpass_corr=bandpass_corr)
#make an array to store header information for the spd files
temp_filename = basename+"_DM%.1f_%.1fs"%(spdcand.subdm, spdcand.topo_start_time)
print_debug("Running waterfaller with Zero-DM OFF...")
data, Data_dedisp_nozerodm = waterfall_array(rawdatafile, spdcand.start,
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub,
spdcand.subdm, spdcand.zerodm, spdcand.downsamp,
spdcand.scaleindep, spdcand.width_bins,
spdcand.mask, maskfile, spdcand.bandpass_corr)
# Add additional information to the header information array
text_array = np.array([args[0], rawdatafile.specinfo.telescope,
rawdatafile.specinfo.ra_str, rawdatafile.specinfo.dec_str,
rawdatafile.specinfo.start_MJD[0], rank,
spdcand.nsub, spdcand.nbins,
spdcand.subdm, spdcand.sigma, spdcand.sample_number,
spdcand.duration, spdcand.width_bins, spdcand.pulse_width,
rawdatafile.tsamp, rawdatafile.specinfo.T, spdcand.topo_start_time,
data.starttime, data.dt,data.numspectra, data.freqs.min(),
data.freqs.max()])
#### Array for plotting Dedispersed waterfall plot zerodm - ON
print_debug("Running Waterfaller with Zero-DM ON...")
zerodm=True
data, Data_dedisp_zerodm = waterfall_array(rawdatafile, spdcand.start,
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub,
spdcand.subdm, zerodm, spdcand.downsamp,
spdcand.scaleindep, spdcand.width_bins,
spdcand.mask, maskfile, spdcand.bandpass_corr)
####Sweeped without zerodm
spdcand.manual_params(subdm, dm, sweep_dm, sigma, start_time,
width_bins, downsamp, duration, nbins, nsub, rawdatafile.tsamp,
rawdatafile.specinfo.N,
rawdatafile.frequencies[0], rawdatafile.frequencies[-1], rawdatafile,
loc_pulse=loc_pulse, dedisp=None, scaleindep=None, zerodm=None, mask=mask,
barytime=barytime, bandpass_corr=bandpass_corr)
data, Data_nozerodm = waterfall_array(rawdatafile, spdcand.start,
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub,
spdcand.subdm, spdcand.zerodm, spdcand.downsamp,
spdcand.scaleindep, spdcand.width_bins,
spdcand.mask, maskfile, spdcand.bandpass_corr)
text_array = np.append(text_array, spdcand.sweep_duration)
text_array = np.append(text_array, data.starttime)
text_array = np.append(text_array, spdcand.bary_start_time)
text_array = np.append(text_array, man_params)
# Array to Construct the sweep
if spdcand.sweep_dm is not None:
ddm = spdcand.sweep_dm-data.dm
delays = psr_utils.delay_from_DM(ddm, data.freqs)
delays -= delays.min()
delays_nozerodm = delays
freqs_nozerodm = data.freqs
# Sweeped with zerodm-on
zerodm = True
#downsamp_temp = 1
data, Data_zerodm = waterfall_array(rawdatafile, spdcand.start,
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub,
spdcand.subdm, zerodm, spdcand.downsamp,
spdcand.scaleindep, spdcand.width_bins,
spdcand.mask, maskfile, spdcand.bandpass_corr)
with open(temp_filename+".spd", 'wb') as f:
np.savez_compressed(f,
Data_dedisp_nozerodm = Data_dedisp_nozerodm.astype(np.float16),
Data_dedisp_zerodm = Data_dedisp_zerodm.astype(np.float16),
Data_nozerodm = Data_nozerodm.astype(np.float16),
delays_nozerodm = delays_nozerodm,
freqs_nozerodm = freqs_nozerodm,
Data_zerodm = Data_zerodm.astype(np.float16),
text_array = text_array)
#### Arrays for Plotting DM vs Time is in plot_spd.plot(...)
if plot:
print_debug("Now plotting...")
plot_spd.plot(temp_filename+".spd", args[1:],
spec_width=spec_width, loc_pulse=loc_pulse, xwin=False,
outfile = basename, just_waterfall=just_waterfall,
integrate_spec=integrate_spec, integrate_ts=integrate_ts,
disp_pulse=disp_pulse, tar = None)
def main():
fn = args[0]
if fn.endswith(".fil"):
# Filterbank file
filetype = "filterbank"
print_debug("Reading filterbank file..")
rawdatafile = filterbank.filterbank(fn)
basename = fn[:-4]
if fn.endswith(".fits"):
# PSRFITS file
filetype = "psrfits"
print_debug("Reading PSRFITS file..")
rawdatafile = psrfits.PsrfitsFile(fn)
basename = fn[:-5]
else:
raise ValueError("Cannot recognize data file type from "
"extension. (Only '.fits' and '.fil' "
"are supported.)")
if options.outbasenm:
basename=options.outbasenm
spdcand = spcand.params()
if not options.man_params:
print_debug('Maximum number of candidates to plot: %i'%options.maxnumcands)
make_spd_from_file(spdcand, rawdatafile,
options.txtfile, options.maskfile,
options.min_rank, options.group_rank,
options.plot, options.just_waterfall,
options.integrate_ts, options.integrate_spec, options.disp_pulse,
options.loc_pulse, options.nsub,
options.maxnumcands,
basename,
mask=options.mask, barytime=options.barytime,
bandpass_corr=options.bandpass_corr)
else:
print_debug("Making spd files based on mannual parameters. I suggest"
"reading in parameters from the groups.txt file.")
make_spd_from_man_params(spdcand, rawdatafile,
options.txtfile, options.maskfile,
options.plot, options.just_waterfall,
options.subdm, options.dm, options.sweep_dms,
options.sigma,
options.start, options.duration,
options.width_bins, options.nbins, options.downsamp,
options.nsub,
options.scaleindep,
options.spec_width, options.loc_pulse,
options.integrate_ts, options.integrate_spec, options.disp_pulse,
basename,
options.mask, options.bandpass_corr, options.barytime,
options.man_params)
if __name__=='__main__':
parser = optparse.OptionParser(prog="sp_pipeline..py",
version=" Chitrang Patel (May. 12, 2015)",
usage="%prog INFILE(PsrFits FILE, SINGLEPULSE FILES)",
description="Create single pulse plots to show the "
"frequency sweeps of a single pulse, "
"DM vs time, and SNR vs DM,"
"in psrFits data.")
parser.add_option('--groupsfile', dest='txtfile', type='string',
help="Give the groups.txt file to read in the groups information.",
default=None)
parser.add_option('--maskfile', dest='maskfile', type='string',
help="Mask file produced by rfifind. Used for "
"masking and bandpass correction.",
default=None)
parser.add_option('--mask', dest='mask', action="store_true",
help="Mask data using rfifind mask (Default: Don't mask).",
default=False)
parser.add_option('--numcands', dest='maxnumcands', type='int',
help="Maximum number of candidates to plot. (Default: 100).",
default=100)
parser.add_option('--subdm', dest='subdm', type='float',
help="DM to use when subbanding. (Default: "
"same as --dm)", default=None)
parser.add_option('-s', '--nsub', dest='nsub', type='int',
help="Number of subbands to use. Must be a factor "
"of number of channels. (Default: "
"number of channels)", default=None)
parser.add_option('--sigma', dest='sigma', type='float',
help="Signal-to-Noise of the pulse."
"(Default: Do not specify. In this case you must specify the "
"number of subbands.)",
default=None)
parser.add_option('-d', '--dm', dest='dm', type='float',
help="DM to use when dedispersing data for plot. "
"(Default: 0 pc/cm^3)", default=0.0)
parser.add_option('--show-ts', dest='integrate_ts', action='store_true',
help="Plot the time series. "
"(Default: Dont show the time series)", default=False)
parser.add_option('--show-spec', dest='integrate_spec', action='store_true',
help="Plot the spectrum. "
"(Default: Do not show the spectrum)", default=False)
parser.add_option("--spec-width", dest="spec_width", type="float", help="Twice "
"this number times the pulse width is the window around the "
"pulse considered for the spectrum. (Default: 1.5)",
default=1.5)
parser.add_option("--loc", dest="loc_pulse", type="float", help="Fraction of "
"the window length where the pulse is located."
"(Default: 0.5: half way in.)",
default=0.5)
parser.add_option('--show-sweep', dest='disp_pulse', action='store_true',
help="Plot the inset dispersed pulse. "
"(Default: Do not show the dispersed pulse)", default=False)
parser.add_option('--bandpass', dest='bandpass_corr', action='store_true',
help="Correct for the bandpass. Requires an rfifind "
"mask provided by --mask option."
"(Default: Do not remove bandpass)", default=False)
parser.add_option('-T', '--start-time', dest='start', type='float',
help="Time into observation (in seconds) at which "
"to start plot.")
parser.add_option('--notopo', dest='barytime', action='store_false',
help="Do not topocenter the given time. Use this option "
"only if the given time is topocentric."
"(Default: topocenter the given barycentric time)", default=True)
parser.add_option('-t', '--duration', dest='duration', type='float',
help="Duration (in seconds) of plot.")
parser.add_option('-n', '--nbins', dest='nbins', type='int',
help="Number of time bins to plot. This option takes "
"precedence over -t/--duration if both are "
"provided.")
parser.add_option('--width-bins', dest='width_bins', type='int',
help="Smooth each channel/subband with a boxcar "
"this many bins wide. (Default: Don't smooth)",
default=1)
parser.add_option('--sweep-dm', dest='sweep_dms', type='float',
action='append',
help="Show the frequency sweep using this DM. "
"(Default: Don't show sweep)", default=[])
parser.add_option('--sweep-posn', dest='sweep_posns', type='float',
action='append',
help="Show the frequency sweep at this position. "
"The position refers to the high-frequency "
"edge of the plot. Also, the position should "
"be a number between 0 and 1, where 0 is the "
"left edge of the plot. "
"(Default: 0)", default=None)
parser.add_option('--downsamp', dest='downsamp', type='int',
help="Factor to downsample data by. (Default: 1).",
default=1)
parser.add_option('--scaleindep', dest='scaleindep', action='store_true',
help="If this flag is set scale each channel "
"independently. (Default: Scale using "
"global maximum.)",
default=False)
parser.add_option('--min-rank', dest='min_rank', type='int',
help="Min rank you want to make spd files for. (Default: 3)"
" Rank 1: noise,"
" Rank 2: RFI,"
" Rank 3: maybe astrophysical, very low S/N,"
" Rank 4: probably astrophysical but weak, low S/N,"
" Rank 5: Very high chance of being astrophysical. S/N>8.0,"
" Rank 6: Almost guranteed to be astrophysical. S/N>9.2,",
default=3)
parser.add_option('--group-rank', dest='group_rank', type='int',
help="Min rank you want to make spd files for. (Default: None)"
" Rank 1: noise,"
" Rank 2: RFI,"
" Rank 3: maybe astrophysical, very low S/N,"
" Rank 4: probably astrophysical but weak, low S/N,"
" Rank 5: Very high chance of being astrophysical. S/N>8.0,"
" Rank 6: Almost guranteed to be astrophysical. S/N>9.2,",
default=None)
parser.add_option('--use_manual_params', dest='man_params', action='store_true',
help="If this flag is not set it will use the parameters "
"from the RRATrap groups.txt file. "
"(Default: Not use this flag. When using "
"parameters from the output of rratrap. Just input"
"groups.txt file, mask file, the PSRFITs file"
"and the .singlepulse files as input. No need to specify any of"
" the other arguments.)",
default=False)
parser.add_option('-o', dest='outbasenm', type='string',
help="basename of the output spd file.",
default=None)
parser.add_option('--noplot', dest='plot', action='store_false',
help="Do not generate spd plots.",
default=True)
parser.add_option('--just-waterfall', dest='just_waterfall', action='store_true',
help="Only produce the waterfall plots (frequency vs Time).",
default=False)
options, args = parser.parse_args()
if not args[0].endswith("fits") or args[0].endswith("fil"):
raise ValueError("The first file must be a psrFits or a filterbank file! ")
if (hasattr(options, 'bandpass_corr')) and (not hasattr(options, 'maskfile')):
raise ValueError("For bandpass correction you need to supply a mask file.")
if not hasattr(options, 'man_params'):
if not hasattr(options, 'txtfile'):
raise ValueError("The groups.txt file must be given on the command line! ")
else:
if not hasattr(options, 'start'):
raise ValueError("Start time (-T/--start-time) "
"must be given on command line!")
if (not hasattr(options, 'duration')) and (not hasattr(options, 'nbins')):
raise ValueError("One of duration (-t/--duration) "
"and num bins (-n/--nbins)"
"must be given on command line!")
if options.subdm is None:
options.subdm = options.dm
main()
| 33,809 | 58.211909 | 165 | py |
presto | presto-master/python/fftfit_src/test_fftfit.py | from __future__ import print_function
#>>> print fftfit.__doc__
#This module 'fftfit' is auto-generated with f2py (version:2.13.175-1250).
#Functions:
# zbrent = zbrent(x1,x2,f1,f2,tol,tmp,pha,nsum)
# dchisqr = dchisqr(tau,tmp,r,nsum)
# cprof(y,c,amp,pha,nmax=len(y),nh=(len(c)-1))
# fccf(amp,pha,shift)
# ffft(d,npts,isign,ireal)
# fftfit(prof,s,phi,nmax,shift,eshift,snr,esnr,b,errb,ngood)
import numpy as np
from presto.psr_utils import gaussian_profile
from presto.fftfit import cprof, fftfit
template = gaussian_profile(64, 0.5, 0.1)
c,amp,pha = cprof(template)
#pha.savespace()
pha1 = pha[0]
pha = np.fmod(pha-np.arange(1,len(pha)+1)*pha1,2.0*np.pi)
for phs in [0.1, 0.3, 0.7]:
prof = gaussian_profile(64, phs, 0.1)+np.random.standard_normal(64)
shift,eshift,snr,esnr,b,errb,ngood = fftfit(prof,amp,pha)
print("True phs = %f, measured phs = %f +/- %f" % (phs, shift/len(prof),eshift/len(prof)))
| 921 | 35.88 | 94 | py |
presto | presto-master/python/wrappers/presto.py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.1.0
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
import _presto
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
class fcomplex(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
r = property(_presto.fcomplex_r_get, _presto.fcomplex_r_set)
i = property(_presto.fcomplex_i_get, _presto.fcomplex_i_set)
def __init__(self):
_presto.fcomplex_swiginit(self, _presto.new_fcomplex())
__swig_destroy__ = _presto.delete_fcomplex
# Register fcomplex in _presto:
_presto.fcomplex_swigregister(fcomplex)
SQRT2 = _presto.SQRT2
PI = _presto.PI
TWOPI = _presto.TWOPI
DEGTORAD = _presto.DEGTORAD
RADTODEG = _presto.RADTODEG
PIBYTWO = _presto.PIBYTWO
SOL = _presto.SOL
SECPERJULYR = _presto.SECPERJULYR
SECPERDAY = _presto.SECPERDAY
ARCSEC2RAD = _presto.ARCSEC2RAD
SEC2RAD = _presto.SEC2RAD
LOWACC = _presto.LOWACC
HIGHACC = _presto.HIGHACC
INTERBIN = _presto.INTERBIN
INTERPOLATE = _presto.INTERPOLATE
NO_CHECK_ALIASED = _presto.NO_CHECK_ALIASED
CHECK_ALIASED = _presto.CHECK_ALIASED
CONV = _presto.CONV
CORR = _presto.CORR
INPLACE_CONV = _presto.INPLACE_CONV
INPLACE_CORR = _presto.INPLACE_CORR
FFTDK = _presto.FFTDK
FFTD = _presto.FFTD
FFTK = _presto.FFTK
NOFFTS = _presto.NOFFTS
RAW = _presto.RAW
PREPPED = _presto.PREPPED
FFT = _presto.FFT
SAME = _presto.SAME
def read_wisdom() -> "void":
return _presto.read_wisdom()
def good_factor(nn: "long long") -> "long long":
return _presto.good_factor(nn)
def fftwcall(indata: "fcomplex", isign: "int") -> "void":
return _presto.fftwcall(indata, isign)
def tablesixstepfft(indata: "fcomplex", isign: "int") -> "void":
return _presto.tablesixstepfft(indata, isign)
def realfft(data: "float *", isign: "int") -> "void":
return _presto.realfft(data, isign)
class infodata(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
ra_s = property(_presto.infodata_ra_s_get, _presto.infodata_ra_s_set)
dec_s = property(_presto.infodata_dec_s_get, _presto.infodata_dec_s_set)
N = property(_presto.infodata_N_get, _presto.infodata_N_set)
dt = property(_presto.infodata_dt_get, _presto.infodata_dt_set)
fov = property(_presto.infodata_fov_get, _presto.infodata_fov_set)
mjd_f = property(_presto.infodata_mjd_f_get, _presto.infodata_mjd_f_set)
dm = property(_presto.infodata_dm_get, _presto.infodata_dm_set)
freq = property(_presto.infodata_freq_get, _presto.infodata_freq_set)
freqband = property(_presto.infodata_freqband_get, _presto.infodata_freqband_set)
chan_wid = property(_presto.infodata_chan_wid_get, _presto.infodata_chan_wid_set)
wavelen = property(_presto.infodata_wavelen_get, _presto.infodata_wavelen_set)
waveband = property(_presto.infodata_waveband_get, _presto.infodata_waveband_set)
energy = property(_presto.infodata_energy_get, _presto.infodata_energy_set)
energyband = property(_presto.infodata_energyband_get, _presto.infodata_energyband_set)
num_chan = property(_presto.infodata_num_chan_get, _presto.infodata_num_chan_set)
mjd_i = property(_presto.infodata_mjd_i_get, _presto.infodata_mjd_i_set)
ra_h = property(_presto.infodata_ra_h_get, _presto.infodata_ra_h_set)
ra_m = property(_presto.infodata_ra_m_get, _presto.infodata_ra_m_set)
dec_d = property(_presto.infodata_dec_d_get, _presto.infodata_dec_d_set)
dec_m = property(_presto.infodata_dec_m_get, _presto.infodata_dec_m_set)
bary = property(_presto.infodata_bary_get, _presto.infodata_bary_set)
numonoff = property(_presto.infodata_numonoff_get, _presto.infodata_numonoff_set)
notes = property(_presto.infodata_notes_get, _presto.infodata_notes_set)
name = property(_presto.infodata_name_get, _presto.infodata_name_set)
object = property(_presto.infodata_object_get, _presto.infodata_object_set)
instrument = property(_presto.infodata_instrument_get, _presto.infodata_instrument_set)
observer = property(_presto.infodata_observer_get, _presto.infodata_observer_set)
analyzer = property(_presto.infodata_analyzer_get, _presto.infodata_analyzer_set)
telescope = property(_presto.infodata_telescope_get, _presto.infodata_telescope_set)
band = property(_presto.infodata_band_get, _presto.infodata_band_set)
filt = property(_presto.infodata_filt_get, _presto.infodata_filt_set)
def __init__(self):
_presto.infodata_swiginit(self, _presto.new_infodata())
__swig_destroy__ = _presto.delete_infodata
# Register infodata in _presto:
_presto.infodata_swigregister(infodata)
def readinf(data: "infodata", filenm: "char *") -> "void":
return _presto.readinf(data, filenm)
def writeinf(data: "infodata") -> "void":
return _presto.writeinf(data)
class orbitparams(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
p = property(_presto.orbitparams_p_get, _presto.orbitparams_p_set)
e = property(_presto.orbitparams_e_get, _presto.orbitparams_e_set)
x = property(_presto.orbitparams_x_get, _presto.orbitparams_x_set)
w = property(_presto.orbitparams_w_get, _presto.orbitparams_w_set)
t = property(_presto.orbitparams_t_get, _presto.orbitparams_t_set)
pd = property(_presto.orbitparams_pd_get, _presto.orbitparams_pd_set)
wd = property(_presto.orbitparams_wd_get, _presto.orbitparams_wd_set)
def __init__(self):
_presto.orbitparams_swiginit(self, _presto.new_orbitparams())
__swig_destroy__ = _presto.delete_orbitparams
# Register orbitparams in _presto:
_presto.orbitparams_swigregister(orbitparams)
class psrparams(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
jname = property(_presto.psrparams_jname_get, _presto.psrparams_jname_set)
bname = property(_presto.psrparams_bname_get, _presto.psrparams_bname_set)
alias = property(_presto.psrparams_alias_get, _presto.psrparams_alias_set)
ra2000 = property(_presto.psrparams_ra2000_get, _presto.psrparams_ra2000_set)
dec2000 = property(_presto.psrparams_dec2000_get, _presto.psrparams_dec2000_set)
dm = property(_presto.psrparams_dm_get, _presto.psrparams_dm_set)
timepoch = property(_presto.psrparams_timepoch_get, _presto.psrparams_timepoch_set)
p = property(_presto.psrparams_p_get, _presto.psrparams_p_set)
pd = property(_presto.psrparams_pd_get, _presto.psrparams_pd_set)
pdd = property(_presto.psrparams_pdd_get, _presto.psrparams_pdd_set)
f = property(_presto.psrparams_f_get, _presto.psrparams_f_set)
fd = property(_presto.psrparams_fd_get, _presto.psrparams_fd_set)
fdd = property(_presto.psrparams_fdd_get, _presto.psrparams_fdd_set)
orb = property(_presto.psrparams_orb_get, _presto.psrparams_orb_set)
def __init__(self):
_presto.psrparams_swiginit(self, _presto.new_psrparams())
__swig_destroy__ = _presto.delete_psrparams
# Register psrparams in _presto:
_presto.psrparams_swigregister(psrparams)
def get_psr_at_epoch(psrname: "char *", epoch: "double", psr: "psrparams") -> "int":
return _presto.get_psr_at_epoch(psrname, epoch, psr)
def get_psr_from_parfile(parfilenm: "char *", epoch: "double", psr: "psrparams") -> "int":
return _presto.get_psr_from_parfile(parfilenm, epoch, psr)
def mjd_to_datestr(mjd: "double", datestr: "char *") -> "void":
return _presto.mjd_to_datestr(mjd, datestr)
def fresnl(xxa: "double") -> "double *, double *":
return _presto.fresnl(xxa)
class rderivs(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
pow = property(_presto.rderivs_pow_get, _presto.rderivs_pow_set)
phs = property(_presto.rderivs_phs_get, _presto.rderivs_phs_set)
dpow = property(_presto.rderivs_dpow_get, _presto.rderivs_dpow_set)
dphs = property(_presto.rderivs_dphs_get, _presto.rderivs_dphs_set)
d2pow = property(_presto.rderivs_d2pow_get, _presto.rderivs_d2pow_set)
d2phs = property(_presto.rderivs_d2phs_get, _presto.rderivs_d2phs_set)
locpow = property(_presto.rderivs_locpow_get, _presto.rderivs_locpow_set)
def __init__(self):
_presto.rderivs_swiginit(self, _presto.new_rderivs())
__swig_destroy__ = _presto.delete_rderivs
# Register rderivs in _presto:
_presto.rderivs_swigregister(rderivs)
class fourierprops(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
r = property(_presto.fourierprops_r_get, _presto.fourierprops_r_set)
rerr = property(_presto.fourierprops_rerr_get, _presto.fourierprops_rerr_set)
z = property(_presto.fourierprops_z_get, _presto.fourierprops_z_set)
zerr = property(_presto.fourierprops_zerr_get, _presto.fourierprops_zerr_set)
w = property(_presto.fourierprops_w_get, _presto.fourierprops_w_set)
werr = property(_presto.fourierprops_werr_get, _presto.fourierprops_werr_set)
pow = property(_presto.fourierprops_pow_get, _presto.fourierprops_pow_set)
powerr = property(_presto.fourierprops_powerr_get, _presto.fourierprops_powerr_set)
sig = property(_presto.fourierprops_sig_get, _presto.fourierprops_sig_set)
rawpow = property(_presto.fourierprops_rawpow_get, _presto.fourierprops_rawpow_set)
phs = property(_presto.fourierprops_phs_get, _presto.fourierprops_phs_set)
phserr = property(_presto.fourierprops_phserr_get, _presto.fourierprops_phserr_set)
cen = property(_presto.fourierprops_cen_get, _presto.fourierprops_cen_set)
cenerr = property(_presto.fourierprops_cenerr_get, _presto.fourierprops_cenerr_set)
pur = property(_presto.fourierprops_pur_get, _presto.fourierprops_pur_set)
purerr = property(_presto.fourierprops_purerr_get, _presto.fourierprops_purerr_set)
locpow = property(_presto.fourierprops_locpow_get, _presto.fourierprops_locpow_set)
def __init__(self):
_presto.fourierprops_swiginit(self, _presto.new_fourierprops())
__swig_destroy__ = _presto.delete_fourierprops
# Register fourierprops in _presto:
_presto.fourierprops_swigregister(fourierprops)
class foldstats(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
numdata = property(_presto.foldstats_numdata_get, _presto.foldstats_numdata_set)
data_avg = property(_presto.foldstats_data_avg_get, _presto.foldstats_data_avg_set)
data_var = property(_presto.foldstats_data_var_get, _presto.foldstats_data_var_set)
numprof = property(_presto.foldstats_numprof_get, _presto.foldstats_numprof_set)
prof_avg = property(_presto.foldstats_prof_avg_get, _presto.foldstats_prof_avg_set)
prof_var = property(_presto.foldstats_prof_var_get, _presto.foldstats_prof_var_set)
redchi = property(_presto.foldstats_redchi_get, _presto.foldstats_redchi_set)
def __init__(self):
_presto.foldstats_swiginit(self, _presto.new_foldstats())
__swig_destroy__ = _presto.delete_foldstats
# Register foldstats in _presto:
_presto.foldstats_swigregister(foldstats)
def gen_fvect(nl: "long") -> "long *":
return _presto.gen_fvect(nl)
def gen_cvect(nl: "long") -> "long *":
return _presto.gen_cvect(nl)
def power_arr(dft: "fcomplex") -> "long *":
return _presto.power_arr(dft)
def phase_arr(dft: "fcomplex") -> "long *":
return _presto.phase_arr(dft)
def frotate(data: "float *", bins_to_left: "float") -> "void":
return _presto.frotate(data, bins_to_left)
def drotate(data: "double *", bins_to_left: "double") -> "void":
return _presto.drotate(data, bins_to_left)
def keplers_eqn(t: "double", p_orb: "double", e: "double", Eacc: "double") -> "double":
return _presto.keplers_eqn(t, p_orb, e, Eacc)
def E_to_phib(E: "double *", orb: "orbitparams") -> "void":
return _presto.E_to_phib(E, orb)
def E_to_v(E: "double *", orb: "orbitparams") -> "void":
return _presto.E_to_v(E, orb)
def E_to_p(E: "double *", p_psr: "double", orb: "orbitparams") -> "void":
return _presto.E_to_p(E, p_psr, orb)
def E_to_z(E: "double *", p_psr: "double", T: "double", orb: "orbitparams") -> "void":
return _presto.E_to_z(E, p_psr, T, orb)
def E_to_phib_BT(E: "double *", orb: "orbitparams") -> "void":
return _presto.E_to_phib_BT(E, orb)
def dorbint(Eo: "double", numpts: "long", dt: "double", orb: "orbitparams") -> "long *":
return _presto.dorbint(Eo, numpts, dt, orb)
def binary_velocity(T: "double", orbit: "orbitparams") -> "double *, double *":
return _presto.binary_velocity(T, orbit)
def r_resp_halfwidth(accuracy: "presto_interp_acc") -> "int":
return _presto.r_resp_halfwidth(accuracy)
def z_resp_halfwidth(z: "double", accuracy: "presto_interp_acc") -> "int":
return _presto.z_resp_halfwidth(z, accuracy)
def w_resp_halfwidth(z: "double", w: "double", accuracy: "presto_interp_acc") -> "int":
return _presto.w_resp_halfwidth(z, w, accuracy)
def bin_resp_halfwidth(ppsr: "double", T: "double", orbit: "orbitparams") -> "int":
return _presto.bin_resp_halfwidth(ppsr, T, orbit)
def gen_r_response(roffset: "double", numbetween: "int", numkern: "int") -> "long *":
return _presto.gen_r_response(roffset, numbetween, numkern)
def gen_z_response(roffset: "double", numbetween: "int", numkern: "int", z: "double") -> "long *":
return _presto.gen_z_response(roffset, numbetween, numkern, z)
def gen_w_response(roffset: "double", numbetween: "int", numkern: "int", z: "double", w: "double") -> "long *":
return _presto.gen_w_response(roffset, numbetween, numkern, z, w)
def gen_w_response2(roffset: "double", numbetween: "int", numkern: "int", z: "double", w: "double") -> "long *":
return _presto.gen_w_response2(roffset, numbetween, numkern, z, w)
def gen_bin_response(roffset: "double", numbetween: "int", numkern: "int", ppsr: "double", T: "double", orbit: "orbitparams") -> "long *":
return _presto.gen_bin_response(roffset, numbetween, numkern, ppsr, T, orbit)
def get_localpower(data: "fcomplex", r: "double") -> "float":
return _presto.get_localpower(data, r)
def get_localpower3d(data: "fcomplex", r: "double", z: "double", w: "double") -> "float":
return _presto.get_localpower3d(data, r, z, w)
def get_derivs3d(data: "fcomplex", numdata: "long", r: "double", z: "double", w: "double", localpower: "float", result: "rderivs") -> "void":
return _presto.get_derivs3d(data, numdata, r, z, w, localpower, result)
def calc_props(data: "rderivs", r: "double", z: "double", w: "double", result: "fourierprops") -> "void":
return _presto.calc_props(data, r, z, w, result)
def calc_binprops(props: "fourierprops", T: "double", lowbin: "int", nfftbins: "int", result: "binaryprops *") -> "void":
return _presto.calc_binprops(props, T, lowbin, nfftbins, result)
def calc_rzwerrs(props: "fourierprops", T: "double", result: "rzwerrs *") -> "void":
return _presto.calc_rzwerrs(props, T, result)
def extended_equiv_gaussian_sigma(logp: "double") -> "double":
return _presto.extended_equiv_gaussian_sigma(logp)
def log_asymtotic_incomplete_gamma(a: "double", z: "double") -> "double":
return _presto.log_asymtotic_incomplete_gamma(a, z)
def log_asymtotic_gamma(z: "double") -> "double":
return _presto.log_asymtotic_gamma(z)
def equivalent_gaussian_sigma(logp: "double") -> "double":
return _presto.equivalent_gaussian_sigma(logp)
def chi2_logp(chi2: "double", dof: "double") -> "double":
return _presto.chi2_logp(chi2, dof)
def chi2_sigma(chi2: "double", dof: "double") -> "double":
return _presto.chi2_sigma(chi2, dof)
def candidate_sigma(power: "double", numsum: "int", numtrials: "double") -> "double":
return _presto.candidate_sigma(power, numsum, numtrials)
def power_for_sigma(sigma: "double", numsum: "int", numtrials: "double") -> "double":
return _presto.power_for_sigma(sigma, numsum, numtrials)
def switch_f_and_p(_in: "double", ind: "double", indd: "double") -> "double *, double *, double *":
return _presto.switch_f_and_p(_in, ind, indd)
def chisqr(data: "double *", avg: "double", var: "double") -> "double":
return _presto.chisqr(data, avg, var)
def z2n(data: "double *", var: "double", n: "int") -> "double":
return _presto.z2n(data, var, n)
def print_candidate(cand: "fourierprops", dt: "double", N: "unsigned long", nph: "float", numerrdigits: "int") -> "void":
return _presto.print_candidate(cand, dt, N, nph, numerrdigits)
def print_bin_candidate(cand: "binaryprops *", numerrdigits: "int") -> "void":
return _presto.print_bin_candidate(cand, numerrdigits)
def read_rzw_cand(file: "FILE *", cands: "fourierprops") -> "int":
return _presto.read_rzw_cand(file, cands)
def get_rzw_cand(filenm: "char *", candnum: "int", cand: "fourierprops") -> "void":
return _presto.get_rzw_cand(filenm, candnum, cand)
def read_bin_cand(file: "FILE *", cands: "binaryprops *") -> "int":
return _presto.read_bin_cand(file, cands)
def get_bin_cand(filenm: "char *", candnum: "int", cand: "binaryprops *") -> "void":
return _presto.get_bin_cand(filenm, candnum, cand)
def next2_to_n(x: "long long") -> "long long":
return _presto.next2_to_n(x)
def is_power_of_10(n: "long long") -> "int":
return _presto.is_power_of_10(n)
def choose_good_N(orig_N: "long long") -> "long long":
return _presto.choose_good_N(orig_N)
def dms2rad(deg: "int", min: "int", sec: "double") -> "double":
return _presto.dms2rad(deg, min, sec)
def hms2rad(hour: "int", min: "int", sec: "double") -> "double":
return _presto.hms2rad(hour, min, sec)
def hours2hms(hours: "double") -> "int *, int *, double *":
return _presto.hours2hms(hours)
def deg2dms(degrees: "double") -> "int *, int *, double *":
return _presto.deg2dms(degrees)
def sphere_ang_diff(ra1: "double", dec1: "double", ra2: "double", dec2: "double") -> "double":
return _presto.sphere_ang_diff(ra1, dec1, ra2, dec2)
def corr_rz_plane(data: "fcomplex", numbetween: "int", startbin: "int", zlo: "double", zhi: "double", numz: "int", fftlen: "int", accuracy: "presto_interp_acc") -> "long *":
return _presto.corr_rz_plane(data, numbetween, startbin, zlo, zhi, numz, fftlen, accuracy)
def corr_rzw_vol(data: "fcomplex", numbetween: "int", startbin: "int", zlo: "double", zhi: "double", numz: "int", wlo: "double", whi: "double", numw: "int", fftlen: "int", accuracy: "presto_interp_acc") -> "long *":
return _presto.corr_rzw_vol(data, numbetween, startbin, zlo, zhi, numz, wlo, whi, numw, fftlen, accuracy)
def max_r_arr(data: "fcomplex", rin: "double", derivs: "rderivs") -> "double *, double *":
return _presto.max_r_arr(data, rin, derivs)
def max_rz_arr(data: "fcomplex", rin: "double", zin: "double", derivs: "rderivs") -> "double *, double *, double *":
return _presto.max_rz_arr(data, rin, zin, derivs)
def max_rz_arr_harmonics(data: "fcomplex", rin: "double", zin: "double", derivdata: "double *") -> "double *, double *":
return _presto.max_rz_arr_harmonics(data, rin, zin, derivdata)
def max_rzw_arr_harmonics(data: "fcomplex", rin: "double", zin: "double", win: "double", derivdata: "double *") -> "double *, double *, double *":
return _presto.max_rzw_arr_harmonics(data, rin, zin, win, derivdata)
def max_rzw_arr(data: "fcomplex", rin: "double", zin: "double", win: "double", derivs: "rderivs") -> "double *, double *, double *, double *":
return _presto.max_rzw_arr(data, rin, zin, win, derivs)
def barycenter(topotimes: "double *", barytimes: "double *", voverc: "double *", ra: "char *", dec: "char *", obs: "char *", ephem: "char *") -> "void":
return _presto.barycenter(topotimes, barytimes, voverc, ra, dec, obs, ephem)
def DOF_corr(dt_per_bin: "double") -> "double":
return _presto.DOF_corr(dt_per_bin)
def simplefold(data: "float *", dt: "double", tlo: "double", prof: "double *", startphs: "double", f0: "double", fdot: "double", fdotdot: "double", standard: "int") -> "double":
return _presto.simplefold(data, dt, tlo, prof, startphs, f0, fdot, fdotdot, standard)
def nice_output_1(output: "char *", val: "double", err: "double", len: "int") -> "int":
return _presto.nice_output_1(output, val, err, len)
def nice_output_2(output: "char *", val: "double", err: "double", len: "int") -> "int":
return _presto.nice_output_2(output, val, err, len)
| 22,317 | 45.11157 | 215 | py |
presto | presto-master/python/ppgplot_src/__init__.py | from _ppgplot import *
| 23 | 11 | 22 | py |
presto | presto-master/bin/GBT350_drift_search.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import zip
from builtins import str
from builtins import range
from builtins import object
from operator import attrgetter
import glob, os, os.path, shutil, socket, tarfile, stat
import numpy, sys, time
from presto import sigproc
from presto import sifting
from presto import presto
from presto import psr_utils as pu
institution = "NRAOCV"
base_tmp_dir = "/dev/shm/"
base_output_dir = "/home/sransom/results/GBT/drift"
#-------------------------------------------------------------------
# Tunable parameters for searching and folding
# (you probably don't need to tune any of them)
orig_N = 1728000 # Number of samples to analyze at a time (~141 sec)
raw_N = 1900000 # Number of samples to step through .fits files
overlap_factor = 0.5 # Overlap each orig_N samples by this fraction
rfifind_chunk_time = 25600 * 0.00008192 # ~2.1 sec
singlepulse_threshold = 5.0 # threshold SNR for candidate determination
singlepulse_plot_SNR = 5.5 # threshold SNR for singlepulse plot
singlepulse_maxwidth = 0.1 # max pulse width in seconds
to_prepfold_sigma = 6.0 # incoherent sum significance to fold candidates
max_lo_cands_to_fold = 20 # Never fold more than this many lo-accel candidates
max_hi_cands_to_fold = 10 # Never fold more than this many hi-accel candidates
numhits_to_fold = 2 # Number of DMs with a detection needed to fold
low_DM_cutoff = 1.0 # Lowest DM to consider as a "real" pulsar
lo_accel_numharm = 16 # max harmonics
lo_accel_sigma = 2.0 # threshold gaussian significance
lo_accel_zmax = 0 # bins
lo_accel_flo = 2.0 # Hz
hi_accel_numharm = 8 # max harmonics
hi_accel_sigma = 3.0 # threshold gaussian significance
hi_accel_zmax = 50 # bins
hi_accel_flo = 1.0 # Hz
low_T_to_search = 50.0 # sec
# Sifting specific parameters (don't touch without good reason!)
sifting.sigma_threshold = to_prepfold_sigma-1.0 # incoherent power threshold (sigma)
sifting.c_pow_threshold = 100.0 # coherent power threshold
sifting.r_err = 1.1 # Fourier bin tolerence for candidate equivalence
sifting.short_period = 0.0005 # Shortest period candidates to consider (s)
sifting.long_period = 15.0 # Longest period candidates to consider (s)
sifting.harm_pow_cutoff = 8.0 # Power required in at least one harmonic
#-------------------------------------------------------------------
def find_masked_fraction(obs):
"""
find_masked_fraction(obs):
Parse the output file from an rfifind run and return the
fraction of the data that was suggested to be masked.
"""
rfifind_out = obs.basefilenm + "_rfifind.out"
for line in open(rfifind_out):
if "Number of bad intervals" in line:
return float(line.split("(")[1].split("%")[0])/100.0
# If there is a problem reading the file, return 100%
return 100.0
def timed_execute(cmd, run_cmd=1):
"""
timed_execute(cmd):
Execute the command 'cmd' after logging the command
to STDOUT. Return the wall-clock amount of time
the command took to execute.
"""
sys.stdout.write("\n'"+cmd+"'\n")
sys.stdout.flush()
start = time.time()
if run_cmd: os.system(cmd)
end = time.time()
return end - start
def get_folding_command(cand, obs, ddplans):
"""
get_folding_command(cand, obs, ddplans):
Return a command for prepfold for folding the subbands using
an obs_info instance, a list of the ddplans, and a candidate
instance that describes the observations and searches.
"""
# Folding rules are based on the facts that we want:
# 1. Between 24 and 200 bins in the profiles
# 2. For most candidates, we want to search length = 101 p/pd/DM cubes
# (The side of the cube is always 2*M*N+1 where M is the "factor",
# either -npfact (for p and pd) or -ndmfact, and N is the number of bins
# in the profile). A search of 101^3 points is pretty fast.
# 3. For slow pulsars (where N=100 or 200), since we'll have to search
# many points, we'll use fewer intervals in time (-npart 30)
# 4. For the slowest pulsars, in order to avoid RFI, we'll
# not search in period-derivative.
zmax = cand.filename.split("_")[-1]
outfilenm = obs.basefilenm+"_DM%s_Z%s"%(cand.DMstr, zmax)
hidms = [x.lodm for x in ddplans[1:]] + [2000]
dfacts = [x.downsamp for x in ddplans]
for hidm, dfact in zip(hidms, dfacts):
if cand.DM < hidm:
downsamp = dfact
break
if downsamp==1:
filfile = obs.fil_filenm
else:
filfile = obs.basefilenm+"_DS%d.fil"%downsamp
p = 1.0 / cand.f
if (p < 0.002):
Mp, Mdm, N = 2, 2, 24
otheropts = "-npart 50 -ndmfact 3"
elif p < 0.05:
Mp, Mdm, N = 2, 1, 50
otheropts = "-npart 40 -pstep 1 -pdstep 2 -dmstep 3"
elif p < 0.5:
Mp, Mdm, N = 1, 1, 100
otheropts = "-npart 30 -pstep 1 -pdstep 2 -dmstep 1"
else:
Mp, Mdm, N = 1, 1, 200
otheropts = "-npart 30 -nopdsearch -pstep 1 -pdstep 2 -dmstep 1"
return "prepfold -noxwin -accelcand %d -accelfile %s.cand -dm %.2f -o %s %s -n %d -npfact %d -ndmfact %d %s" % \
(cand.candnum, cand.filename, cand.DM, outfilenm,
otheropts, N, Mp, Mdm, filfile)
class obs_info(object):
"""
class obs_info(fil_filenm)
A class describing the observation and the analysis.
"""
def __init__(self, fil_filenm):
self.fil_filenm = fil_filenm
self.basefilenm = fil_filenm[:fil_filenm.find(".fil")]
filhdr, hdrlen = sigproc.read_header(fil_filenm)
self.MJD = filhdr['tstart']
self.nchans = filhdr['nchans']
self.ra_rad = sigproc.ra2radians(filhdr['src_raj'])
self.ra_string = pu.coord_to_string(*pu.rad_to_hms(self.ra_rad))
self.dec_rad = sigproc.dec2radians(filhdr['src_dej'])
self.dec_string = pu.coord_to_string(*pu.rad_to_dms(self.dec_rad))
self.str_coords = "J"+"".join(self.ra_string.split(":")[:2])
if self.dec_rad >= 0.0: self.str_coords += "+"
self.str_coords += "".join(self.dec_string.split(":")[:2])
self.az = filhdr['az_start']
self.el = 90.0-filhdr['za_start']
fillen = os.stat(fil_filenm)[6]
self.raw_N = (fillen-hdrlen)/(filhdr['nbits']/8)/filhdr['nchans']
self.dt = filhdr['tsamp']
self.raw_T = self.raw_N * self.dt
self.N = orig_N
self.T = self.N * self.dt
# Determine the average barycentric velocity of the observation
self.baryv = presto.get_baryv(self.ra_string, self.dec_string,
self.MJD, self.T, obs="GB")
# Where to dump all the results
# Directory structure is under the base_output_directory
# according to base/MJD/filenmbase/beam
self.outputdir = os.path.join(base_output_dir,
str(int(self.MJD)),
self.str_coords)
# Figure out which host we are processing on
self.hostname = socket.gethostname()
# The fraction of the data recommended to be masked by rfifind
self.masked_fraction = 0.0
# Initialize our timers
self.rfifind_time = 0.0
self.downsample_time = 0.0
self.dedispersing_time = 0.0
self.FFT_time = 0.0
self.lo_accelsearch_time = 0.0
self.hi_accelsearch_time = 0.0
self.singlepulse_time = 0.0
self.sifting_time = 0.0
self.folding_time = 0.0
self.total_time = 0.0
# Inialize some candidate counters
self.num_sifted_cands = 0
self.num_folded_cands = 0
self.num_single_cands = 0
def write_report(self, filenm):
report_file = open(filenm, "w")
report_file.write("---------------------------------------------------------\n")
report_file.write("%s was processed on %s\n"%(self.fil_filenm, self.hostname))
report_file.write("Ending UTC time: %s\n"%(time.asctime(time.gmtime())))
report_file.write("Total wall time: %.1f s (%.2f hrs)\n"%\
(self.total_time, self.total_time/3600.0))
report_file.write("Fraction of data masked: %.2f%%\n"%\
(self.masked_fraction*100.0))
report_file.write("---------------------------------------------------------\n")
report_file.write(" rfifind time = %7.1f sec (%5.2f%%)\n"%\
(self.rfifind_time, self.rfifind_time/self.total_time*100.0))
report_file.write(" dedispersing time = %7.1f sec (%5.2f%%)\n"%\
(self.dedispersing_time, self.dedispersing_time/self.total_time*100.0))
report_file.write(" single-pulse time = %7.1f sec (%5.2f%%)\n"%\
(self.singlepulse_time, self.singlepulse_time/self.total_time*100.0))
report_file.write(" FFT time = %7.1f sec (%5.2f%%)\n"%\
(self.FFT_time, self.FFT_time/self.total_time*100.0))
report_file.write(" lo-accelsearch time = %7.1f sec (%5.2f%%)\n"%\
(self.lo_accelsearch_time, self.lo_accelsearch_time/self.total_time*100.0))
report_file.write(" hi-accelsearch time = %7.1f sec (%5.2f%%)\n"%\
(self.hi_accelsearch_time, self.hi_accelsearch_time/self.total_time*100.0))
report_file.write(" sifting time = %7.1f sec (%5.2f%%)\n"%\
(self.sifting_time, self.sifting_time/self.total_time*100.0))
report_file.write(" folding time = %7.1f sec (%5.2f%%)\n"%\
(self.folding_time, self.folding_time/self.total_time*100.0))
report_file.write("---------------------------------------------------------\n")
report_file.close()
class dedisp_plan(object):
"""
class dedisp_plan(lodm, dmstep, dmsperpass, numpasses, numsub, downsamp)
A class describing a de-dispersion plan for prepsubband in detail.
"""
def __init__(self, lodm, dmstep, dmsperpass, numpasses, numsub, downsamp):
self.lodm = float(lodm)
self.dmstep = float(dmstep)
self.dmsperpass = int(dmsperpass)
self.numpasses = int(numpasses)
self.numsub = int(numsub)
self.downsamp = int(downsamp)
self.sub_dmstep = self.dmsperpass * self.dmstep
self.dmlist = [] # These are strings for comparison with filenames
self.subdmlist = []
for ii in range(self.numpasses):
self.subdmlist.append("%.2f"%(self.lodm + (ii+0.5)*self.sub_dmstep))
lodm = self.lodm + ii * self.sub_dmstep
dmlist = ["%.2f"%dm for dm in \
numpy.arange(self.dmsperpass)*self.dmstep + lodm]
self.dmlist.append(dmlist)
def main(fil_filenm, workdir, ddplans):
# Change to the specified working directory
os.chdir(workdir)
# Get information on the observation and the job
job = obs_info(fil_filenm)
if job.raw_T < low_T_to_search:
print("The observation is too short (%.2f s) to search."%job.raw_T)
sys.exit()
job.total_time = time.time()
ddplans = ddplans[job.nchans]
# Use whatever .zaplist is found in the current directory
default_zaplist = glob.glob("*.zaplist")[0]
# Make sure the output directory (and parent directories) exist
try:
os.makedirs(job.outputdir)
os.chmod(job.outputdir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH)
except: pass
# Make sure the tmp directory (in a tmpfs mount) exists
tmpdir = os.path.join(base_tmp_dir, job.basefilenm)
try:
os.makedirs(tmpdir)
except: pass
print("\nBeginning GBT350 driftscan search of '%s'"%job.fil_filenm)
print("UTC time is: %s"%(time.asctime(time.gmtime())))
# rfifind the filterbank file
cmd = "rfifind -time %.17g -o %s %s > %s_rfifind.out"%\
(rfifind_chunk_time, job.basefilenm,
job.fil_filenm, job.basefilenm)
job.rfifind_time += timed_execute(cmd)
maskfilenm = job.basefilenm + "_rfifind.mask"
# Find the fraction that was suggested to be masked
# Note: Should we stop processing if the fraction is
# above some large value? Maybe 30%?
job.masked_fraction = find_masked_fraction(job)
# Iterate over the stages of the overall de-dispersion plan
dmstrs = []
for ddplan in ddplans:
# Make a downsampled filterbank file
if ddplan.downsamp > 1:
cmd = "downsample_filterbank.py %d %s"%(ddplan.downsamp, job.fil_filenm)
job.downsample_time += timed_execute(cmd)
fil_filenm = job.fil_filenm[:job.fil_filenm.find(".fil")] + \
"_DS%d.fil"%ddplan.downsamp
else:
fil_filenm = job.fil_filenm
# Iterate over the individual passes through the .fil file
for passnum in range(ddplan.numpasses):
subbasenm = "%s_DM%s"%(job.basefilenm, ddplan.subdmlist[passnum])
# Now de-disperse
cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -nsub %d -numdms %d -numout %d -o %s/%s %s"%\
(maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
ddplan.numsub, ddplan.dmsperpass, job.N/ddplan.downsamp,
tmpdir, job.basefilenm, fil_filenm)
job.dedispersing_time += timed_execute(cmd)
# Iterate over all the new DMs
for dmstr in ddplan.dmlist[passnum]:
dmstrs.append(dmstr)
basenm = os.path.join(tmpdir, job.basefilenm+"_DM"+dmstr)
datnm = basenm+".dat"
fftnm = basenm+".fft"
infnm = basenm+".inf"
# Do the single-pulse search
cmd = "single_pulse_search.py -p -m %f -t %f %s"%\
(singlepulse_maxwidth, singlepulse_threshold, datnm)
job.singlepulse_time += timed_execute(cmd)
try:
shutil.move(basenm+".singlepulse", workdir)
except: pass
# FFT, zap, and de-redden
cmd = "realfft %s"%datnm
job.FFT_time += timed_execute(cmd)
cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
(default_zaplist, job.baryv, fftnm)
job.FFT_time += timed_execute(cmd)
cmd = "rednoise %s"%fftnm
job.FFT_time += timed_execute(cmd)
try:
os.rename(basenm+"_red.fft", fftnm)
except: pass
# Do the low-acceleration search
cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
(lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, fftnm)
job.lo_accelsearch_time += timed_execute(cmd)
try:
os.remove(basenm+"_ACCEL_%d.txtcand"%lo_accel_zmax)
except: pass
try: # This prevents errors if there are no cand files to copy
shutil.move(basenm+"_ACCEL_%d.cand"%lo_accel_zmax, workdir)
shutil.move(basenm+"_ACCEL_%d"%lo_accel_zmax, workdir)
except: pass
# Do the high-acceleration search
cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
(hi_accel_numharm, hi_accel_sigma, hi_accel_zmax, hi_accel_flo, fftnm)
job.hi_accelsearch_time += timed_execute(cmd)
try:
os.remove(basenm+"_ACCEL_%d.txtcand"%hi_accel_zmax)
except: pass
try: # This prevents errors if there are no cand files to copy
shutil.move(basenm+"_ACCEL_%d.cand"%hi_accel_zmax, workdir)
shutil.move(basenm+"_ACCEL_%d"%hi_accel_zmax, workdir)
except: pass
# Move the .inf files
try:
shutil.move(infnm, workdir)
except: pass
# Remove the .dat and .fft files
try:
os.remove(datnm)
except: pass
try:
os.remove(fftnm)
except: pass
# Make the single-pulse plots
basedmb = job.basefilenm+"_DM"
basedme = ".singlepulse "
# The following will make plots for DM ranges:
# 0-30, 20-110, 100-310, 300-1000+
dmglobs = [basedmb+"[0-9].[0-9][0-9]"+basedme +
basedmb+"[012][0-9].[0-9][0-9]"+basedme,
basedmb+"[2-9][0-9].[0-9][0-9]"+basedme +
basedmb+"10[0-9].[0-9][0-9]"+basedme,
basedmb+"[12][0-9][0-9].[0-9][0-9]"+basedme +
basedmb+"30[0-9].[0-9][0-9]"+basedme,
basedmb+"[3-9][0-9][0-9].[0-9][0-9]"+basedme +
basedmb+"1[0-9][0-9][0-9].[0-9][0-9]"+basedme]
dmrangestrs = ["0-30", "20-110", "100-310", "300-1000+"]
psname = job.basefilenm+"_singlepulse.ps"
for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
cmd = 'single_pulse_search.py -t %f -g "%s"' % \
(singlepulse_plot_SNR, dmglob)
job.singlepulse_time += timed_execute(cmd)
try:
os.rename(psname,
job.basefilenm+"_DMs%s_singlepulse.ps"%dmrangestr)
except: pass
# Sift through the candidates to choose the best to fold
job.sifting_time = time.time()
lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%lo_accel_zmax))
if len(lo_accel_cands):
lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
if len(lo_accel_cands):
lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, numhits_to_fold,
dmstrs, low_DM_cutoff)
if len(lo_accel_cands):
lo_accel_cands.sort(key=attrgetter('sigma'), reverse=True)
sifting.write_candlist(lo_accel_cands,
job.basefilenm+".accelcands_Z%d"%lo_accel_zmax)
hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%hi_accel_zmax))
if len(hi_accel_cands):
hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
if len(hi_accel_cands):
hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, numhits_to_fold,
dmstrs, low_DM_cutoff)
if len(hi_accel_cands):
hi_accel_cands.sort(key=attrgetter('sigma'), reverse=True)
sifting.write_candlist(hi_accel_cands,
job.basefilenm+".accelcands_Z%d"%hi_accel_zmax)
try:
cmd = "mv *.accelcands* "+job.outputdir
os.system(cmd)
except: pass
job.sifting_time = time.time() - job.sifting_time
# Fold the best candidates
cands_folded = 0
for cand in lo_accel_cands:
if cands_folded == max_lo_cands_to_fold:
break
elif cand.sigma > to_prepfold_sigma:
job.folding_time += timed_execute(get_folding_command(cand, job, ddplans))
cands_folded += 1
cands_folded = 0
for cand in hi_accel_cands:
if cands_folded == max_hi_cands_to_fold:
break
elif cand.sigma > to_prepfold_sigma:
job.folding_time += timed_execute(get_folding_command(cand, job, ddplans))
cands_folded += 1
# Remove the bestprof files
bpfiles = glob.glob("*.pfd.bestprof")
for bpfile in bpfiles:
os.remove(bpfile)
# Now step through the .ps files and convert them to .png and gzip them
psfiles = glob.glob("*.ps")
for psfile in psfiles:
if "singlepulse" in psfile:
# For some reason the singlepulse files don't transform nicely...
epsfile = psfile.replace(".ps", ".eps")
os.system("eps2eps "+psfile+" "+epsfile)
os.system("pstoimg -density 100 -crop a "+epsfile)
try:
os.remove(epsfile)
except: pass
else:
os.system("pstoimg -density 100 -flip cw "+psfile)
os.system("gzip "+psfile)
# Tar up the results files
tar_suffixes = ["_ACCEL_%d.tgz"%lo_accel_zmax,
"_ACCEL_%d.tgz"%hi_accel_zmax,
"_ACCEL_%d.cand.tgz"%lo_accel_zmax,
"_ACCEL_%d.cand.tgz"%hi_accel_zmax,
"_singlepulse.tgz",
"_inf.tgz",
"_pfd.tgz"]
tar_globs = ["*_ACCEL_%d"%lo_accel_zmax,
"*_ACCEL_%d"%hi_accel_zmax,
"*_ACCEL_%d.cand"%lo_accel_zmax,
"*_ACCEL_%d.cand"%hi_accel_zmax,
"*.singlepulse",
"*_DM[0-9]*.inf",
"*.pfd"]
for (tar_suffix, tar_glob) in zip(tar_suffixes, tar_globs):
tf = tarfile.open(job.basefilenm+tar_suffix, "w:gz")
for infile in glob.glob(tar_glob):
tf.add(infile)
os.remove(infile)
tf.close()
# Remove all the downsampled .fil files
filfiles = glob.glob("*_DS?.fil") + glob.glob("*_DS??.fil")
for filfile in filfiles:
os.remove(filfile)
# Remove the tmp directory (in a tmpfs mount)
try:
os.rmdir(tmpdir)
except: pass
# And finish up
job.total_time = time.time() - job.total_time
print("\nFinished")
print("UTC time is: %s"%(time.asctime(time.gmtime())))
# Write the job report
job.write_report(job.basefilenm+".report")
job.write_report(os.path.join(job.outputdir, job.basefilenm+".report"))
# Move all the important stuff to the output directory
cmd = "mv *rfifind.[bimors]* *.tgz *.ps.gz *.png *.report "+\
job.outputdir
os.system(cmd)
if __name__ == "__main__":
# Create our de-dispersion plans
ddplans = {1024:[], 2048:[]}
if (0):
# The following are the near-optimal values for 1024 and 2048 lags.
# They keeps the total dispersive smearing (i.e.
# not counting scattering) <1 ms up to a DM of ~100 pc cm^-3 for 1024-lag
# data and ~200 pc cm^-3 for 2048-lag data.
# For 1024 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[1024].append(dedisp_plan( 0.0, 0.02, 20, 91, 32, 1))
ddplans[1024].append(dedisp_plan( 36.4, 0.03, 24, 30, 32, 2))
ddplans[1024].append(dedisp_plan( 58.0, 0.05, 24, 35, 32, 4))
ddplans[1024].append(dedisp_plan( 100.0, 0.10, 24, 40, 32, 8))
ddplans[1024].append(dedisp_plan( 196.0, 0.30, 22, 45, 32, 16))
ddplans[1024].append(dedisp_plan( 493.0, 0.50, 24, 30, 32, 32))
ddplans[1024].append(dedisp_plan( 853.0, 1.00, 24, 7, 32, 64))
# For 2048 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[2048].append(dedisp_plan( 0.0, 0.02, 20, 177, 32, 1))
ddplans[2048].append(dedisp_plan( 70.8, 0.03, 24, 60, 32, 2))
ddplans[2048].append(dedisp_plan( 114.0, 0.05, 24, 65, 32, 4))
ddplans[2048].append(dedisp_plan( 192.0, 0.10, 24, 80, 32, 8))
ddplans[2048].append(dedisp_plan( 384.0, 0.30, 22, 80, 32, 16))
ddplans[2048].append(dedisp_plan( 912.0, 0.50, 24, 8, 32, 32))
elif (0):
#
# If there is <=1GB of RAM per node, the following are preferred
#
# DDplan.py -f 350.0 -b 50.0 -n 1024 -t 0.00008192 -s 64 -r 0.2
# For 1024 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[1024].append(dedisp_plan( 0.0, 0.03, 50, 37, 64, 2))
ddplans[1024].append(dedisp_plan( 55.5, 0.05, 50, 17, 64, 4))
ddplans[1024].append(dedisp_plan( 98.0, 0.10, 50, 19, 64, 8))
ddplans[1024].append(dedisp_plan( 193.0, 0.20, 50, 19, 64, 16))
ddplans[1024].append(dedisp_plan( 383.0, 0.50, 50, 19, 64, 32))
ddplans[1024].append(dedisp_plan( 858.0, 1.00, 50, 3, 64, 64))
# DDplan.py -f 350.0 -b 50.0 -n 2048 -t 0.00008192 -s 64 -r 0.2
# For 2048 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[2048].append(dedisp_plan( 0.0, 0.03, 50, 74, 64, 2))
ddplans[2048].append(dedisp_plan( 111.0, 0.05, 50, 33, 64, 4))
ddplans[2048].append(dedisp_plan( 193.5, 0.10, 50, 38, 64, 8))
ddplans[2048].append(dedisp_plan( 383.5, 0.20, 50, 38, 64, 16))
ddplans[2048].append(dedisp_plan( 763.5, 0.50, 50, 10, 64, 32))
elif (1):
#
# If there is 2GB or more RAM per node, the following are probably faster
#
# DDplan.py -f 350.0 -b 50.0 -n 1024 -t 0.00008192 -s 128 -r 0.2
# For 1024 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[1024].append(dedisp_plan( 0.0, 0.03, 100, 19, 128, 2))
ddplans[1024].append(dedisp_plan( 57.0, 0.05, 100, 8, 128, 4))
ddplans[1024].append(dedisp_plan( 97.0, 0.10, 100, 10, 128, 8))
ddplans[1024].append(dedisp_plan( 197.0, 0.20, 100, 10, 128, 16))
ddplans[1024].append(dedisp_plan( 397.0, 0.50, 100, 10, 128, 32))
ddplans[1024].append(dedisp_plan( 897.0, 1.00, 100, 2, 128, 64))
# DDplan.py -f 350.0 -b 50.0 -n 2048 -t 0.00008192 -s 128 -r 0.2
# For 2048 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[2048].append(dedisp_plan( 0.0, 0.03, 100, 37, 128, 2))
ddplans[2048].append(dedisp_plan( 111.0, 0.05, 100, 17, 128, 4))
ddplans[2048].append(dedisp_plan( 196.0, 0.10, 100, 19, 128, 8))
ddplans[2048].append(dedisp_plan( 386.0, 0.20, 100, 19, 128, 16))
ddplans[2048].append(dedisp_plan( 766.0, 0.50, 100, 5, 128, 32))
elif (0):
#
# This is for "quick" processing
#
# DDplan.py -f 350.0 -b 50.0 -n 1024 -t 0.00008192 -s 128 -r 1.5
# For 1024 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[1024].append(dedisp_plan( 0.0, 0.20, 100, 20, 128, 16))
ddplans[1024].append(dedisp_plan( 400.0, 0.50, 100, 10, 128, 32))
ddplans[1024].append(dedisp_plan( 900.0, 1.00, 100, 2, 128, 64))
# DDplan.py -f 350.0 -b 50.0 -n 2048 -t 0.00008192 -s 128 -r 1.5
# For 2048 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[2048].append(dedisp_plan( 0.0, 0.20, 100, 39, 128, 16))
ddplans[2048].append(dedisp_plan( 780.0, 0.50, 100, 5, 128, 32))
# Arguments to the search program are
# sys.argv[1] = filterbank file name
# sys.argv[2] = working directory name
if len(sys.argv) >= 3:
workdir = sys.argv[2]
fil_filenm = sys.argv[1]
main(fil_filenm, workdir, ddplans)
elif len(sys.argv) == 2:
fil_filenm = sys.argv[1]
main(fil_filenm, '.', ddplans)
else:
print("GBT350_drift_search.py fil_filenm [workdir]")
| 28,008 | 46.392555 | 116 | py |
presto | presto-master/bin/sum_profiles.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import zip
from builtins import range
import getopt, sys
import os.path
from presto import fftfit
from presto import psr_utils
from presto import sinc_interp
from presto import Pgplot
import numpy as Num
from presto.prepfold import pfd
from presto.psr_constants import *
scopes = {'GBT':'1', 'Arecibo':'3', 'Parkes':'7', 'GMRT': 'r'}
def measure_phase(profile, template):
"""
measure_phase(profile, template):
Call FFTFIT on the profile and template to determine the
following parameters: shift,eshift,snr,esnr,b,errb,ngood
(returned as a tuple). These are defined as in Taylor's
talk at the Royal Society.
"""
c,amp,pha = fftfit.cprof(template)
pha1 = pha[0]
pha = Num.fmod(pha-Num.arange(1,len(pha)+1)*pha1,TWOPI)
shift,eshift,snr,esnr,b,errb,ngood = fftfit.fftfit(profile,amp,pha)
return shift,eshift,snr,esnr,b,errb,ngood
def parse_vals(valstring):
"""
parse_vals(valstring):
Return a list of integers that corresponds to each of the numbers
in a string representation where '-' gives an inclusive range
and ',' separates individual values or ranges. For example:
> parse_vals('5,8,10-13,17')
would return: [5, 8, 10, 11, 12, 13, 17]
"""
if (len(valstring)==0 or
(len(valstring)==1 and not valstring.isdigit())):
return None
vals = set()
for xx in valstring.split(','):
if (xx.find("-") > 0):
lo, hi = xx.split("-")
vals = vals.union(set(range(int(lo), int(hi)+1)))
else:
vals.add(int(xx))
vals = list(vals)
vals.sort()
return vals
def usage():
print("""
usage: sum_profiles.py [options which must include -t or -g] profs_file
[-h, --help] : Display this help
[-b bkgd_cutoff, --background=cutoff] : Fractional cutoff for the background level
or, if the arg is a string (i.e. containing
',' and/or '-'), use the bins specified (as
for parse_vals()) as the background values
[-f, --fitbaseline] : Fit a 3-rd order polynomial to the specified
background values before determining the RMS
[-p pulsebins, --pulsebins=pulsebins] : A 'parse_vals' string that specifies the bins
to include when integrating flux. The minimum
value (plus 1/2 of the std-dev) is substracted.
This is not usually needed, but is used when there
are pulse artifacts or if you want the flux of
only part of a profile.
[-d DM, --dm=DM] : Re-combine subbands at DM
[-n N, --numbins=N] : The number of bins to use in the resulting profile
[-g gausswidth, --gaussian=width] : Use a Gaussian template of FWHM width
or, if the arg is a string, read the file
to get multiple-gaussian parameters
[-t templateprof, --template=prof] : The template .bestprof file to use
[-o outputfilenm, --output=filenm] : The output file to use for the summed profile
[-s SEFD, --sefd=SEFD] : For rough flux calcs, the SEFD (i.e. Tsys/G)
This program reads in a list of *.pfd files from profs_file and then
de-disperses each of these using the DM specified. The de-dispersed
profiles are fit against a template to determine an absolute offset,
and are then co-added together to produce a 'master' profile. Each profile
is scaled so that the RMS level of the off-pulse region is equivalent.
To-do: -- add a .par option so that the profiles can be added based on
an ephemeris.
""")
if __name__ == '__main__':
# Import Psyco if available
try:
import psyco
psyco.full()
except ImportError:
pass
try:
opts, args = getopt.getopt(sys.argv[1:], "hfb:p:d:n:g:t:o:s:",
["help", "fitbaselibe", "background=", "pulsebins=",
"dm=", "numbins=", "gaussian=", "template=",
"outputfilenm=", "sefd="])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
if len(sys.argv)==1:
usage()
sys.exit(2)
fitbaseline = False
lowfreq = None
DM = 0.0
bkgd_cutoff = 0.1
bkgd_vals = None
gaussianwidth = 0.1
gaussfitfile = None
templatefilenm = None
pulsebins = None
numbins = 128
SEFD = 0.0
outfilenm = "sum_profiles.bestprof"
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-f", "--fitbaseline"):
fitbaseline = True
if o in ("-b", "--background"):
if '-' in a or ',' in a:
bkgd_vals = Num.asarray(parse_vals(a))
else:
try:
bkgd_cutoff = float(a)
except ValueError:
bkgd_vals = Num.asarray(parse_vals(a))
if o in ("-p", "--pulsebins"):
pulsebins = Num.asarray(parse_vals(a))
if o in ("-d", "--dm"):
DM = float(a)
if o in ("-n", "--numbins"):
numbins = int(a)
if o in ("-g", "--gaussian"):
try:
gaussianwidth = float(a)
except ValueError:
gaussfitfile = a
if o in ("-t", "--template"):
templatefilenm = a
if o in ("-o", "--output"):
outfilenm = a
if o in ("-s", "--sefd"):
SEFD = float(a)
print("Creating a summed profile of length %d bins using DM = %f"%(numbins, DM))
# Read the template profile or create an appropriate Gaussian
if templatefilenm is not None:
template = psr_utils.read_profile(templatefilenm)
# Resample the template profile to have the correct number of bins (if required)
if not len(template)==numbins:
oldlen = len(template)
template = sinc_interp.periodic_interp(template, numbins)[::oldlen]
else:
if gaussfitfile is not None:
template = psr_utils.read_gaussfitfile(gaussfitfile, numbins)
else:
template = psr_utils.gaussian_profile(numbins, 0.0, gaussianwidth)
# Normalize it
template -= min(template)
template /= max(template)
# Rotate it so that it becomes a "true" template according to FFTFIT
shift,eshift,snr,esnr,b,errb,ngood = measure_phase(template, template)
template = psr_utils.fft_rotate(template, shift)
# Determine the off-pulse bins
if bkgd_vals is not None:
Pgplot.plotxy(template, labx="Phase bins")
Pgplot.plotxy(template[bkgd_vals], Num.arange(numbins)[bkgd_vals],
line=None, symbol=2, color='red')
Pgplot.closeplot()
offpulse_inds = bkgd_vals
onpulse_inds = set(Num.arange(numbins)) - set(bkgd_vals)
else:
offpulse_inds = Num.compress(template<=bkgd_cutoff, Num.arange(numbins))
onpulse_inds = Num.compress(template>bkgd_cutoff, Num.arange(numbins))
Pgplot.plotxy(template)
Pgplot.plotxy([bkgd_cutoff, bkgd_cutoff], [0.0, numbins], color='red')
Pgplot.closeplot()
# If the number of bins in the offpulse section is < 10% of the total
# use the statistics in the .pfd file to set the RMS
if (len(offpulse_inds) < 0.1*numbins):
print("Number of off-pulse bins to use for RMS is too low. Using .pfd stats.")
usestats = 1
else:
usestats = 0
# Read the list of *.pfd files to process
pfdfilenms = []
killsubss = []
killintss = []
for line in open(sys.argv[-1]):
if not line.startswith("#"):
sline = line.split()
pfdfilenm = sline[0]
if len(sline)==1:
killsubs, killints = None, None
elif len(sline)==2:
killsubs = parse_vals(sline[1])
killints = None
elif len(sline)>=3:
killsubs = parse_vals(sline[1])
killints = parse_vals(sline[2])
if os.path.exists(pfdfilenm):
pfdfilenms.append(pfdfilenm)
killsubss.append(killsubs)
killintss.append(killints)
else:
print("Can't find '%s'. Skipping it."%pfdfilenm)
sumprof = Num.zeros(numbins, dtype='d')
base_T = None
base_BW = None
orig_fctr = None
Tprerfi = 0.0
Tpostrfi = 0.0
avg_S = 0.0
# Step through the profiles and determine the offsets
for pfdfilenm, killsubs, killints in zip(pfdfilenms, killsubss, killintss):
print("\n Processing '%s'..."%pfdfilenm)
# Read the fold data and de-disperse at the requested DM
current_pfd = pfd(pfdfilenm)
current_pfd.dedisperse(DM)
# This corrects for any searching that prepfold did to find the peak
current_pfd.adjust_period()
T = current_pfd.T
Tprerfi += T
BW = current_pfd.nsub*current_pfd.subdeltafreq
fctr = current_pfd.lofreq + 0.5*BW
# If there are subbands to kill, kill em'
if killsubs is not None:
print(" killing subbands: ", killsubs)
current_pfd.kill_subbands(killsubs)
BW *= (current_pfd.nsub-len(killsubs))/float(current_pfd.nsub)
# If there are intervals to kill, kill em'
if killints is not None:
print(" killing intervals: ", killints)
current_pfd.kill_intervals(killints)
T *= (current_pfd.npart-len(killints))/float(current_pfd.npart)
if base_T is None:
base_T = T
if base_BW is None:
base_BW = BW
if orig_fctr is None:
orig_fctr = fctr
else:
if fctr != orig_fctr:
print("Warning!: fctr = %f, but original f_ctr = %f!" % (fctr, orig_fctr))
Tpostrfi += T
prof = current_pfd.profs.sum(0).sum(0)
# Resample the current profile to have the correct number of bins
if not len(prof)==numbins:
oldlen = len(prof)
prof = sinc_interp.periodic_interp(prof, numbins)[::oldlen]
# Determine the amount to rotate the profile using FFTFIT
shift,eshift,snr,esnr,b,errb,ngood = measure_phase(prof, template)
# Rotate the profile to match the template
newprof = psr_utils.fft_rotate(prof, shift)
offpulse = newprof[offpulse_inds]
# Remove a polynomial fit from the off-pulse region if required
if fitbaseline:
pfit = Num.poly1d(Num.polyfit(offpulse_inds, offpulse, 3))
offpulse -= pfit(offpulse_inds)
if 0:
Pgplot.plotxy(offpulse)
Pgplot.closeplot()
# Determine the off-pulse RMS
if usestats:
print("Using raw data statistics instead of off-pulse region")
offpulse_rms = Num.sqrt(current_pfd.varprof)
else:
offpulse_rms = offpulse.std()
Ppsr = 1.0 / current_pfd.fold_p1 # Pulsar period
tau_bin = Ppsr / current_pfd.proflen # Duration of profile bin
dt_per_bin = tau_bin / current_pfd.dt
corr_rms = offpulse_rms / Num.sqrt(current_pfd.DOF_corr())
print("samples/bin = ", current_pfd.dt_per_bin)
print("RMSs (uncorr, corr) = ", offpulse_rms, corr_rms)
# Now attempt to shift and scale the profile so that it has
# an off-pulse mean of ~0 and an off-pulse RMS of ~1
offset = Num.median(newprof[offpulse_inds])
newprof -= offset
newprof /= corr_rms
if 0:
Pgplot.plotxy(newprof, labx="Phase bins")
if fitbaseline:
Pgplot.plotxy((pfit(offpulse_inds)-offset)/corr_rms, offpulse_inds,
color='yellow')
Pgplot.plotxy(newprof[offpulse_inds], offpulse_inds,
line=None, symbol=2, color='red')
if pulsebins is not None:
Pgplot.plotxy(newprof[pulsebins], pulsebins,
line=None, symbol=2, color='blue')
Pgplot.closeplot()
if pulsebins is None:
SNR = newprof.sum() # integrate everything
else:
SNR = newprof[pulsebins].sum()
print(" Approx SNR = %.3f" % SNR)
if SEFD:
S = SEFD * SNR / Num.sqrt(2.0 * BW * T / numbins) / numbins
avg_S += S
print(" Approx flux density = %.3f mJy" % S)
# Now weight the profile based on the observation duration
# and BW as compared to the first profile
newprof *= Num.sqrt(T/base_T * BW/base_BW)
if (0):
Pgplot.plotxy(newprof)
Pgplot.closeplot()
# Add it to the summed profile
sumprof += newprof
# Now normalize, plot, and write the summed profile
offpulse = sumprof[offpulse_inds]
# Remove a polynomial fit from the off-pulse region if required
if fitbaseline:
pfit = Num.poly1d(Num.polyfit(offpulse_inds, offpulse, 3))
offpulse -= pfit(offpulse_inds)
Pgplot.plotxy(offpulse)
Pgplot.closeplot()
# Now attempt to shift and scale the profile so that it has
# an off-pulse mean of ~0 and an off-pulse RMS of ~1
sumprof -= Num.median(offpulse)
sumprof *= Num.sqrt(current_pfd.DOF_corr()) / offpulse.std()
print("\nSummed profile approx SNR = %.3f" % sum(sumprof))
if SEFD:
avg_S /= len(pfdfilenms)
if pulsebins is None:
SNR = sumprof.sum() # integrate everything
else:
SNR = sumprof[pulsebins].sum()
S = SEFD * SNR / Num.sqrt(2.0 * BW * Tpostrfi / numbins) / numbins
print(" Approx sum profile flux density = %.3f mJy" % S)
print(" Avg of individual flux densities = %.3f mJy" % avg_S)
print(" Total (RFI cleaned) integration = %.0f s (%.2f hrs)" % \
(Tpostrfi, Tpostrfi/3600.0))
# Rotate the summed profile so that the max value is at the phase ~ 0.25 mark
sumprof = psr_utils.rotate(sumprof, -len(sumprof)/4)
Pgplot.plotxy(sumprof, Num.arange(numbins),
labx="Pulse Phase", laby="Relative Flux")
Pgplot.closeplot()
print("\n Writing profile to '%s'..."%(outfilenm), end=' ')
outfile = open(outfilenm, "w")
for ii, val in enumerate(sumprof):
outfile.write("%04d %20.15g\n"%(ii, val))
outfile.close()
print("Done\n")
| 15,054 | 37.901809 | 92 | py |
presto | presto-master/bin/combine_weights.py | #!/bin/env python
import numpy as num
import sys
if len(sys.argv)==1:
print("usage: combine_weights.py INPUT_rfifind.weight_FILES")
sys.exit()
wgts = None
for name in sys.argv[1:]:
tmpwgts = num.loadtxt(name, dtype=num.int32, comments="#",
usecols=(1,), unpack=True)
if wgts is None:
wgts = tmpwgts
print("'%s' had %d bad channels" % \
(name, num.equal(wgts, 0).sum()))
chans = num.arange(len(wgts))
else:
gain = (tmpwgts-wgts) < 0
print("'%s' gained %d chans:" % (name, gain.sum()), chans[gain])
loss = (tmpwgts-wgts) > 0
print("'%s' lost %d chans:" % (name, loss.sum()), chans[loss])
wgts = num.logical_and(wgts, tmpwgts)
print("There are %d channels total." % num.equal(wgts, 0).sum())
print("Writing them to 'combined.weights'")
outfile = open("combined.weights", 'w')
for ii, wgt in enumerate(wgts):
outfile.write("%4d %d\n" % (ii, wgt))
outfile.close()
| 1,009 | 27.857143 | 72 | py |
presto | presto-master/bin/get_TOAs.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import range
import getopt, sys
from presto import fftfit
from presto import psr_utils
import numpy as Num
from presto.prepfold import pfd
from presto.polycos import polycos
from presto.psr_constants import *
scopes = {'GBT':'1',
'Arecibo':'3',
'Parkes':'7',
'GMRT': 'r',
'IRAM': 's',
'LWA1': 'x',
'LWA': 'x',
'VLA': 'c',
'FAST': 'k',
'MeerKAT': 'm',
'Geocenter': 'o'}
scopes2 = {'GBT':'gbt',
'Arecibo':'ao',
'Parkes':'pks',
'GMRT': 'gmrt',
'LWA1': 'lwa1',
'LWA': 'lwa1',
'VLA': 'vla',
'FAST': 'fast',
'MeerKAT': 'mk',
'Geocenter': 'coe'}
def measure_phase(profile, template, rotate_prof=True):
"""
measure_phase(profile, template):
Call FFTFIT on the profile and template to determine the
following parameters: shift,eshift,snr,esnr,b,errb,ngood
(returned as a tuple). These are defined as in Taylor's
talk at the Royal Society.
"""
c,amp,pha = fftfit.cprof(template)
pha1 = pha[0]
if (rotate_prof):
pha = Num.fmod(pha-Num.arange(1,len(pha)+1)*pha1,TWOPI)
shift,eshift,snr,esnr,b,errb,ngood = fftfit.fftfit(profile,amp,pha)
return shift,eshift,snr,esnr,b,errb,ngood
def usage():
sys.stderr.write("""
usage: get_TOAs.py [options which must include -t or -g] pfd_file
[-h, --help] : Display this help
[-s numsub, --subbands=numsub] : Divide the fold into numsub subbands
[-n numTOAs, --numtoas=numTOAs] : Divide the fold into numTOAs parts
[-d DM, --dm=DM] : Re-combine subbands at DM
[-f, --FFTFITouts] : Print all FFTFIT outputs and errors
[-g gausswidth, --gaussian=width] : Use a Gaussian template of FWHM width
or, if the arg is a string, read the file
to get multiple-gaussian parameters
[-t templateprof, --template=prof] : The template .bestprof file to use
[-k subs_list, --kill=subs_list] : List of subbands to ignore
[-i ints_list, --kints=ints_list] : List of intervals to ignore
[-o seconds, --offset=seconds] : Add the offset in seconds to any TOAs
[-e, --event] : The .pfd file was made with events
[-r, --norotate] : Do not rotate the template for FFTFIT
[-2, --tempo2] : Write Tempo2 format TOAs
pfd_file : The .pfd file containing the folds
The program generates TOAs from a .pfd file using Joe Taylor's
FFTFIT program. The TOAs are output to STDOUT. Typically, the .pfd
file is created using prepfold with the "-timing" flag and an
appropriate .par file on either a topocentric time series or raw
telescope data. But barycentric folds or folds of barycentered
events are also acceptable. The number of bins in the folded profile
must be a power of two for FFTFIT to work. The most important thing
about the fold, though, is that it must have been made using "-nosearch"!
(Note: "-timing" implies "-nosearch" and forces a power-of-two number
of bins.)
A typical example would be something like:
get_TOAs.py -n 30 -t myprof.bestprof -k 0,20-23 myprof.pfd | \\
tail -28 >> good.tim
which would extract 30 TOAs (the default number of slices or parts
in time for "prepfold -timing" is 60) from a fold made from some raw
radio telescope data. The command would ignore (i.e. zero-out)
subbands 0, 20, 21, 22, and 23 (e.g. due to interference) and then
ignore the first 2 TOAs with the tail command.
If you don't specify "-n", the default number of parts in the fold
is assumed, but if you don't specify "-s", all the subbands (if any
are present) are integrated together.
If you specify the "-f" flag, an additional line of output is
displayed for each TOA that shows the "b +/- berr" and "SNR +/-
SNRerr" params from FFTFIT.
""")
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "herf2s:n:d:g:t:o:k:i:",
["help", "event", "norotate", "FFTFITouts",
"tempo2","subbands=", "numtoas=", "dm=", "gaussian=",
"template=", "offset=", "kill=", "kints="])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
if len(sys.argv)==1:
usage()
sys.exit(2)
lowfreq = None
DM = 0.0
gaussianwidth = 0.1
gaussfitfile = None
templatefilenm = None
rotate_prof = True
numsubbands = 1
numtoas = 1
otherouts = 0
offset = 0.0
events = 0
t2format = False
kill = []
kints = []
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-f", "--FFTFITouts"):
otherouts = 1
if o in ("-r", "--norotate"):
rotate_prof = False
if o in ("-2", "--tempo2"):
t2format = True
if o in ("-e", "--event"):
lowfreq = 0.0
DM = 0.0
events = 1
if o in ("-s", "--subbands"):
numsubbands = int(a)
if o in ("-n", "--numtoas"):
numtoas = int(a)
if numtoas==0:
sys.exit()
if o in ("-d", "--dm"):
DM = float(a)
if o in ("-g", "--gaussian"):
try:
gaussianwidth = float(a)
except ValueError:
gaussfitfile = a
if o in ("-t", "--template"):
templatefilenm = a
if o in ("-o", "--offset"):
offset = float(a)
if o in ("-k", "--kill"):
for subs in a.split(','):
if (subs.find("-") > 0):
lo, hi = subs.split("-")
kill.extend(list(range(int(lo), int(hi)+1)))
else:
kill.append(int(subs))
if o in ("-i", "--kints"):
for ints in a.split(','):
if (ints.find("-") > 0):
lo, hi = ints.split("-")
kints.extend(list(range(int(lo), int(hi)+1)))
else:
kints.append(int(ints))
# Read the prepfold output file and the binary profiles
fold_pfd = pfd(sys.argv[-1])
# Check to make sure we can use this .pfd for timing purposes
if not fold_pfd.use_for_timing():
sys.stderr.write(
"Error: '%s' was made allowing prepfold to search!\n" % \
sys.argv[-1])
sys.exit(2)
# Read key information from the bestprof file
if fold_pfd.bestprof:
fold = fold_pfd.bestprof
else:
sys.stderr.write(
"Error: Can't open '%s.bestrof'! Regenerate with show_pfd.\n" % \
sys.argv[-1])
sys.exit(2)
timestep_sec = fold.T / numtoas
timestep_day = timestep_sec / SECPERDAY
fold.epoch = fold.epochi+fold.epochf
# If the requested number of TOAs doesn't divide into the
# number of time intervals, then exit
if fold_pfd.npart % numtoas:
sys.stderr.write(
"Error: # of TOAs (%d) doesn't divide # of time intervals (%d)!\n" % \
(numtoas, fold_pfd.npart))
sys.exit(2)
# Over-ride the DM that was used during the fold
if (DM!=0.0):
fold_pfd.bestdm = DM
if (fold_pfd.numchan==1 and DM==0.0 and events):
fold_pfd.bestdm = 0.0
fold_pfd.numchan = 1
# Kill any required channels and/or subband
fold_pfd.kill_subbands(kill)
# Kill any required intervals
fold_pfd.kill_intervals(kints)
# De-disperse at the requested DM
# Also save the pulse period used in dedispersion calc
fold_pfd.dedisperse(interp=1)
p_dedisp = fold_pfd.proflen / fold_pfd.binspersec
# Combine the profiles as required
profs = fold_pfd.combine_profs(numtoas, numsubbands)
# PRESTO de-disperses at the high frequency channel so determine a
# correction to the middle of the band
if not events:
subpersumsub = fold_pfd.nsub/numsubbands
# Calculate the center of the summed subband freqs and delays
sumsubfreqs = (Num.arange(numsubbands)+0.5)*subpersumsub*fold_pfd.subdeltafreq + \
(fold_pfd.lofreq-0.5*fold_pfd.chan_wid)
# Note: In the following, we cannot use fold_pfd.hifreqdelay since that
# is based on the _barycentric_ high frequency (if the barycentric
# conversion was available). For TOAs, we need a topocentric
# delay, which is based on the topocentric frequency fold_pfd.hifreq
sumsubdelays = (psr_utils.delay_from_DM(fold_pfd.bestdm, sumsubfreqs) -
psr_utils.delay_from_DM(fold_pfd.bestdm, fold_pfd.hifreq))
sumsubdelays_phs = Num.fmod(sumsubdelays / p_dedisp, 1.0)
# Save the "higest channel within a subband" freqs/delays for use in
# later DM/timing correction. PBD 2011/11/03
sumsubfreqs_hi = sumsubfreqs + \
fold_pfd.subdeltafreq/2.0 - fold_pfd.chan_wid/2.0
subdelays2 = psr_utils.delay_from_DM(fold_pfd.bestdm, sumsubfreqs) - \
psr_utils.delay_from_DM(fold_pfd.bestdm, sumsubfreqs_hi)
else:
fold_pfd.subfreqs = Num.asarray([0.0])
sumsubfreqs = Num.asarray([0.0])
sumsubdelays = Num.asarray([0.0])
subdelays2 = Num.asarray([0.0])
sumsubdelays_phs = Num.asarray([0.0])
# Read the template profile
if templatefilenm is not None:
template = psr_utils.read_profile(templatefilenm, normalize=1)
else:
if (gaussfitfile):
template = psr_utils.read_gaussfitfile(gaussfitfile, fold_pfd.proflen)
else:
template = psr_utils.gaussian_profile(fold_pfd.proflen, 0.0, gaussianwidth)
template = template / max(template)
#from Pgplot import *
#plotxy(template)
#closeplot()
# Determine the Telescope used
if (not fold.topo):
obs = '@' # Solarsystem Barycenter
else:
try:
if t2format:
obs = scopes2[fold_pfd.telescope.split()[0]]
else:
obs = scopes[fold_pfd.telescope.split()[0]]
except KeyError: sys.stderr.write("Unknown telescope!!! : " + fold_pfd.telescope)
# Read the polyco file (if required)
if (fold.psr and fold.topo):
if ("polycos" in fold_pfd.__dict__ and
not fold_pfd.polycos==0):
pcs = fold_pfd.polycos
else:
pcs = polycos(fold.psr, sys.argv[-1]+".polycos")
(fold.phs0, fold.f0) = pcs.get_phs_and_freq(fold.epochi, fold.epochf)
fold.f1 = fold.f2 = 0.0
else:
pcs = None
fold.phs0 = 0.0
(fold.f0, fold.f1, fold.f2) = psr_utils.p_to_f(fold.p0, fold.p1, fold.p2)
#
# Calculate the TOAs
#
if t2format:
print("FORMAT 1")
for ii in range(numtoas):
# The .pfd file was generated using -nosearch and a specified
# folding period, p-dot, and p-dotdot (or f, f-dot, and f-dotdot).
if (pcs is None):
# Time at the middle of the interval in question
midtime = fold.epoch + (ii+0.5)*timestep_day
p = 1.0/psr_utils.calc_freq(midtime, fold.epoch, fold.f0, fold.f1, fold.f2)
t0 = psr_utils.calc_t0(midtime, fold.epoch, fold.f0, fold.f1, fold.f2)
t0i= int(t0 + 1e-9)
t0f = t0 - t0i
# The .pfd file was folded using polycos
else:
# Time at the middle of the interval in question
mjdf = fold.epochf + (ii+0.5)*timestep_day
(phs, f0) = pcs.get_phs_and_freq(fold.epochi, mjdf)
phs -= fold.phs0
p = 1.0/f0
if (phs < 0.0): phs += 1.0 # Consistent with pat
t0f = mjdf - phs*p/SECPERDAY
t0i = fold.epochi
for jj in range(numsubbands):
prof = profs[ii][jj]
# If we have zapped intervals or subbands, or added padding
# sometimes we can get folds with no signal at all. Skip these.
if Num.std(prof)==0.0:
sys.stderr.write("Skipping TOA %d for subband %d due to lack of signal\n"%(ii+1, jj+1))
continue
# Make sure that the template and the data have the same number of bins
if (not len(template)==fold_pfd.proflen):
if (not ((len(template)%fold_pfd.proflen)==0 or
(fold_pfd.proflen%len(template))==0)):
if not ii and not jj:
sys.stderr.write("WARNING!: Lengths of template (%d) and data (%d) are incompatible! Skipping '%s'!\n" % (len(template), fold_pfd.proflen, fold_pfd.filenm))
continue
# Interpolate the data
if (len(template) > fold_pfd.proflen):
prof = psr_utils.linear_interpolate(prof, len(template)//fold_pfd.proflen)
if not ii and not jj:
sys.stderr.write("Note: Interpolating the data for '%s'\n"%fold_pfd.filenm)
# Interpolate the template
elif (1):
template = psr_utils.linear_interpolate(template, fold_pfd.proflen//len(template))
if not ii and not jj:
sys.stderr.write("Note: Interpolating the template for '%s'\n"%fold_pfd.filenm)
# Downsample the data (Probably not a good idea)
else:
prof = psr_utils.downsample(prof, fold_pfd.proflen//len(template))
if not ii and not jj:
sys.stderr.write("Note: Downsampling the data for '%s'\n"%fold_pfd.filenm)
try:
tau = None
if len(prof) & 2*len(prof):
sys.stderr.write("Profile length %d is not a power of two; unable to use FFTFIT.\n" % len(prof))
elif len(template) & 2*len(template):
sys.stderr.write("Template length %d is not a power of two; unable to use FFTFIT.\n" % len(template))
else:
# Try using FFTFIT first
shift,eshift,snr,esnr,b,errb,ngood = measure_phase(prof, template, rotate_prof)
# tau and tau_err are the predicted phase of the pulse arrival
tau, tau_err = shift/len(prof), eshift/len(prof)
# Note: "error" flags are shift = 0.0 and eshift = 999.0
# If that failed, use a time-domain correlation
if (Num.fabs(shift) < 1e-7 and
Num.fabs(eshift-999.0) < 1e-7):
sys.stderr.write("Warning! Bad return from FFTFIT. May be due to inadequate signal-to-noise.\n")
tau = None
if tau is None:
sys.stderr.write("Warning: using PRESTO correlation - reported error is incorrect...\n")
# Not enough structure in the template profile for FFTFIT
# so use time-domain correlations instead
tau = psr_utils.measure_phase_corr(prof, template)
# This needs to be changed
tau_err = 0.1/len(prof)
# Calculate correction for dedispersion to true channel
# center freqs that used a slightly different pulse
# period.
dd_phs_2 = subdelays2[jj] * (1.0/p - 1.0/p_dedisp)
# Sum up several phase shifts
tau_tot = Num.fmod(tau+sumsubdelays_phs[jj]+dd_phs_2+3.0, 1.0)
if (tau_tot > 0.5): tau_tot -= 1.0
# Send the TOA to STDOUT
toaf = t0f + (tau_tot*p + offset)/SECPERDAY
newdays = int(Num.floor(toaf))
if t2format:
psr_utils.write_tempo2_toa(t0i+newdays, toaf-newdays,
tau_err*p*1000000.0,
sumsubfreqs[jj], 0.0, name=fold_pfd.pfd_filename, obs=obs)
else:
psr_utils.write_princeton_toa(t0i+newdays, toaf-newdays,
tau_err*p*1000000.0,
sumsubfreqs[jj], 0.0, obs=obs)
if (otherouts):
sys.stderr.write("FFTFIT results: b = %.4g +/- %.4g SNR = %.4g +/- %.4g" %
(b, errb, snr, esnr))
except ValueError as xxx_todo_changeme:
fftfit.error = xxx_todo_changeme
pass
| 17,047 | 40.886978 | 181 | py |
presto | presto-master/bin/pyplotres.py | #!/usr/bin/env python
# A simple command line version of plotres written in python
# using matplotlib and numpy
#
# Patrick Lazarus, Feb 26th, 2009
# Many other tweaks by S. Ransom, T. Pennucci et al over years
#
from __future__ import print_function
from __future__ import absolute_import
from builtins import input
from builtins import str
from builtins import range
from builtins import object
import optparse
import sys
import re
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.cluster.vq import kmeans2
import pyslalib.slalib as slalib
from presto import binary_psr
from presto import parfile as par
from presto import residuals
# Available x-axis types
xvals = ['mjd', 'year', 'numtoa', 'orbitphase']
xind = 0
# Available y-axis types
yvals = ['phase', 'usec', 'sec']
yind = 0
colors = {1: ['#000000'], # black
2: ['#ff0000', '#0000ff'], # red blue
3: ['#ff0000', '#008000', '#0000ff'], # red green blue
4: ['#ff0000', '#FFA500', '#008000', '#0000ff'], # red orange green blue
# red orange green blue violet
5: ['#ff0000', '#FFA500', '#008000', '#0000ff', '#EE82EE'],
# red orange green blue indigo violet
6: ['#ff0000', '#FFA500', '#008000', '#0000ff', '#4B0082', '#EE82EE'],
# red orange yellow green blue indigo violet
7: ['#ff0000', '#FFA500', '#FFFF00', '#008000', '#0000ff', '#4B0082', '#EE82EE'],
# red orange yellow green blue indigo violet black
8: ['#ff0000', '#FFA500', '#FFFF00', '#008000', '#0000ff', '#4B0082', '#EE82EE', '#000000']}
def find_freq_clusters(freqs):
# first make a histogram
minf, maxf = freqs.min(), freqs.max()
maxbins = 8 # related to the max colors defined...
df = 4.0 # MHz
if ((maxf - minf) < df): # Only a single freq to our resolution
return [[0.0, np.inf]]
numbins = int((maxf - minf) / df) + 2
lobound = minf - 0.5 * df
hibound = lobound + numbins * df
hist, edges = np.histogram(freqs, numbins, [lobound, hibound])
# Now choose the maxbins biggest bins where there are TOAs
hibins = hist.argsort()[::-1]
hibins = hibins[hist[hibins] > 0]
if len(hibins) > maxbins:
hibins = hibins[:maxbins]
ctrs = edges[hibins] + 0.5 * df
ctrs.sort()
# and use these as starting points for kmeans
kmeans, indices = kmeans2(freqs, ctrs)
if len(kmeans)==1:
return [[0.0, np.inf]]
elif len(kmeans)==2:
return [[0.0, kmeans.mean()], [kmeans.mean(), np.inf]]
else:
freqbands = [[0.0, kmeans[0:2].mean()]]
for ii in range(len(kmeans)-2):
freqbands.append([kmeans[ii:ii+2].mean(), kmeans[ii+1:ii+3].mean()])
freqbands.append([kmeans[-2:].mean(), np.inf])
return freqbands
class TempoResults(object):
def __init__(self, freqbands):
"""Read TEMPO results (resid2.tmp, tempo.lis, timfile and parfiles)
freqbands is a list of frequency pairs to display.
"""
# Open tempo.lis. Parse it and find input .tim and .par files. Also find output .par file.
inputfiles_re = re.compile(r"Input data from (.*\.tim.*), Parameters from (.*\.par.*)")
outputfile_re = re.compile(r"Assumed parameters -- PSR (.*)$")
tempolisfile = open("tempo.lis")
intimfn, inparfn, outparfn = None, None, None
for line in tempolisfile:
if line[:15]=="Input data from":
sline = line.split()
intimfn = sline[3][:-1] # strip the comma
intimbase = os.path.splitext(intimfn)[0]
inparfn = intimbase+".par" if sline[6]=='def' else sline[6]
if inparfn[-1]==".": inparfn = inparfn[:-1]
elif line[:15]=="Assumed paramet":
outparfn = line.split()[-1]+".par"
if (intimfn != None) and (inparfn != None) and (outparfn != None):
# Found what we're looking for no need to continue parsing the file
break
tempolisfile.close()
# Record filename
self.inparfn = inparfn
self.outparfn = outparfn
self.intimfn = intimfn
# Read parfiles
self.inpar = par.psr_par(inparfn)
self.outpar = par.psr_par(outparfn)
# Read residuals
r = residuals.read_residuals()
self.max_TOA = r.bary_TOA.max()
self.min_TOA = r.bary_TOA.min()
if freqbands is None:
self.freqbands = find_freq_clusters(r.bary_freq)
else:
self.freqbands = freqbands
self.residuals = {}
for lo,hi in self.freqbands:
indices = (r.bary_freq>=lo) & (r.bary_freq<hi)
self.residuals[get_freq_label(lo, hi)] = \
Resids(r.bary_TOA[indices], r.bary_freq[indices], \
np.arange(r.numTOAs)[indices], r.orbit_phs[indices], \
r.postfit_phs[indices], r.postfit_sec[indices], \
r.prefit_phs[indices], r.prefit_sec[indices], \
r.uncertainty[indices], r.weight[indices], \
self.inpar, self.outpar)
def get_info(self, freq_label, index, postfit=True):
"""Given a freq_label and index return formatted text
describing the TOA residual.
Assume postfit period for calculating residual in phase,
unless otherwise indicated.
"""
r = self.residuals[freq_label]
description = []
description.append("TOA Selected:")
description.append("\tNumber: %s" % r.TOA_index[index][0])
description.append("\tEpoch (MJD): %s" % r.bary_TOA[index][0])
description.append("\tFrequency (MHz): %s" % r.bary_freq[index][0])
description.append("\tPre-fit residual (phase): %s" % r.prefit_phs[index][0])
description.append("\tPre-fit residual (usec): %s" % (r.prefit_sec[index][0]*1e6))
description.append("\tPost-fit residual (phase): %s" % r.postfit_phs[index][0])
description.append("\tPost-fit residual (usec): %s" % (r.postfit_sec[index][0]*1e6))
if postfit:
description.append("\tUncertainty (phase): %s" % (r.uncertainty[index][0]/r.outpar.P0))
else:
description.append("\tUncertainty (phase): %s" % (r.uncertainty[index][0]/r.inpar.P0))
description.append("\tUncertainty (usec): %s" % (r.uncertainty[index][0]*1e6))
return description
class Resids(object):
"""The Resids object contains the following information
about TEMPO residuals:
bary_TOA
bary_freq
numTOAs
orbit_phs
postfit_phs
postfit_sec
prefit_phs
prefit_sec
uncertainty
weight
"""
def __init__(self, bary_TOA, bary_freq, TOA_index, orbit_phs, \
postfit_phs, postfit_sec, prefit_phs, prefit_sec, \
uncertainty, weight, inpar, outpar):
self.bary_TOA = bary_TOA
self.bary_freq = bary_freq
self.TOA_index = TOA_index
self.orbit_phs = orbit_phs
self.postfit_phs = postfit_phs
self.postfit_sec = postfit_sec
self.prefit_phs = prefit_phs
self.prefit_sec = prefit_sec
self.uncertainty = uncertainty
self.weight = weight
self.inpar = inpar
self.outpar = outpar
def get_xdata(self, key):
"""Return label describing xaxis and the corresponding
data given keyword 'key'.
"""
# Python2/3 compatible way of checking for string types
# Taken from https://stackoverflow.com/questions/11301138/how-to-check-if-variable-is-string-with-python-2-and-3-compatibility
try:
basestring
except NameError:
basestring = str
if not isinstance(key, basestring):
raise ValueError("key must be of type string.")
xopt = key.lower()
if xopt == 'numtoa':
xdata = self.TOA_index
xlabel = "TOA Number"
elif xopt == 'mjd':
xdata = self.bary_TOA
xlabel = "MJD"
elif xopt == 'orbitphase':
xdata = self.orbit_phs
xlabel = "Orbital Phase"
elif xopt == 'year':
xdata = mjd_to_year(self.bary_TOA)
xlabel = "Year"
else:
raise ValueError("Unknown xaxis type (%s)." % xopt)
return (xlabel, xdata)
def get_ydata(self, key, postfit=True):
"""Return label describing yaxis and the corresponding
data/errors given keyword 'key'.
'postfit' is a boolean argument that determines if
postfit, or prefit data is to be returned.
"""
# Python2/3 compatible way of checking for string types
# Taken from https://stackoverflow.com/questions/11301138/how-to-check-if-variable-is-string-with-python-2-and-3-compatibility
try:
basestring
except NameError:
basestring = str
if not isinstance(key, basestring):
raise ValueError("key must be of type string.")
yopt = key.lower()
if postfit:
if yopt == 'phase':
ydata = self.postfit_phs
#
# NOTE: Should use P at TOA not at PEPOCH
#
yerror = self.uncertainty/self.outpar.P0
ylabel = "Residuals (Phase)"
elif yopt == 'usec':
ydata = self.postfit_sec*1e6
yerror = self.uncertainty*1e6
ylabel = "Residuals (uSeconds)"
elif yopt == 'sec':
ydata = self.postfit_sec
yerror = self.uncertainty
ylabel = "Residuals (Seconds)"
else:
raise ValueError("Unknown yaxis type (%s)." % yopt)
else:
if yopt=='phase':
ydata = self.prefit_phs
#
# NOTE: Should use P at TOA not at PEPOCH
#
yerror = self.uncertainty/self.inpar.P0
ylabel = "Residuals (Phase)"
elif yopt=='usec':
ydata = self.prefit_sec*1e6
yerror = self.uncertainty*1e6
ylabel = "Residuals (uSeconds)"
elif yopt=='sec':
ydata = self.prefit_sec
yerror = self.uncertainty
ylabel = "Residuals (Seconds)"
else:
raise ValueError("Unknown yaxis type (%s)." % yopt)
return (ylabel, ydata, yerror)
def plot_data(tempo_results, xkey, ykey, postfit=True, prefit=False, \
interactive=True, mark_peri=False, show_legend=True):
# figure out what should be plotted
# True means to plot postfit
# False means to plot prefit
if postfit and prefit:
to_plot_postfit = [False, True]
elif postfit and not prefit:
to_plot_postfit = [True]
elif not postfit and prefit:
to_plot_postfit = [False]
else:
raise EmptyPlotValueError("At least one of prefit and postfit must be True.")
subplot = 1
numsubplots = len(to_plot_postfit)
global axes
axes = []
handles = []
labels = []
for usepostfit in to_plot_postfit:
TOAcount = 0
# All subplots are in a single column
if subplot == 1:
axes.append(plt.subplot(numsubplots, 1, subplot))
else:
axes.append(plt.subplot(numsubplots, 1, subplot, sharex=axes[0]))
# set tick formatter to not use scientific notation or an offset
tick_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
tick_formatter.set_scientific(False)
axes[-1].xaxis.set_major_formatter(tick_formatter)
xmin, xmax = axes[0].get_xlim()
for ii,(lo,hi) in enumerate(tempo_results.freqbands):
freq_label = get_freq_label(lo, hi)
resids = tempo_results.residuals[freq_label]
xlabel, xdata = resids.get_xdata(str(xkey))
ylabel, ydata, yerr = resids.get_ydata(str(ykey), usepostfit)
if len(xdata):
# Plot the residuals
handle = plt.errorbar(xdata, ydata, yerr=yerr, fmt='.', \
label=freq_label, picker=5,
c=colors[len(tempo_results.freqbands)][ii])
# Label isn't being set as expected. Use the following
# as a kludgey work-around.
handle[0].set_label(freq_label)
if subplot == 1:
handles.append(handle[0])
labels.append(freq_label)
TOAcount += xdata.size
if subplot > 1:
axes[0].set_xlim((xmin, xmax))
# Finish off the plot
plt.axhline(0, ls='--', label="_nolegend_", c='k', lw=0.5)
axes[-1].ticklabel_format(style='plain', axis='x')
if mark_peri and hasattr(tempo_results.outpar, 'BINARY'):
# Be sure to check if pulsar is in a binary
# Cannot mark passage of periastron if not a binary
if usepostfit:
binpsr = binary_psr.binary_psr(tempo_results.outpar.FILE)
else:
binpsr = binary_psr.binary_psr(tempo_results.inpar.FILE)
xmin, xmax = axes[0].get_xlim()
mjd_min = tempo_results.min_TOA
mjd_max = tempo_results.max_TOA
guess_mjds = np.arange(mjd_max + binpsr.par.PB, \
mjd_min - binpsr.par.PB, -binpsr.par.PB)
for mjd in guess_mjds:
peri_mjd = binpsr.most_recent_peri(float(mjd))
if xkey == 'mjd':
plt.axvline(peri_mjd, ls=':', label='_nolegend_', c='k', lw=0.5)
elif xkey == 'year':
print("plotting peri passage")
plt.axvline(mjd_to_year(peri_mjd), ls=':', label='_nolegend_', c='k', lw=0.5)
axes[0].set_xlim((xmin, xmax))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if usepostfit:
plt.title("Postfit Residuals (Number of TOAs: %d)" % TOAcount)
else:
plt.title("Prefit Residuals (Number of TOAs: %d)" % TOAcount)
subplot += 1
if numsubplots > 1:
# Increase spacing between subplots.
plt.subplots_adjust(hspace=0.25)
# Write name of input files used for timing on figure
if interactive:
fntext = "TOA file: %s, Parameter file: %s" % \
(tempo_results.intimfn, tempo_results.inparfn)
figure_text = plt.figtext(0.01, 0.01, fntext, verticalalignment='bottom', \
horizontalalignment='left')
# Make the legend and set its visibility state
leg = plt.figlegend(handles, labels, 'upper right')
leg.set_visible(show_legend)
leg.legendPatch.set_alpha(0.5)
def create_plot():
# Set up the plot
fig = plt.figure(figsize=(11,8.5))
def get_freq_label(lo, hi):
"""Return frequency label given a lo and hi
frequency pair.
"""
if hi==np.inf:
return "%.0f - Inf MHz" % (lo)
else:
return "%.0f - %.0f MHz" % (lo, hi)
def savefigure(savefn='./resid2.tmp.ps'):
print("Saving plot to %s" % savefn)
plt.savefig(savefn, orientation='landscape', papertype='letter')
def reloadplot():
global options
global tempo_results
# Reload residuals and replot
print("Plotting...")
fig = plt.gcf()
fig.set_visible(False)
plt.clf() # clear figure
tempo_results = TempoResults(options.freqbands)
if options.freqbands is None:
options.freqbands = tempo_results.freqbands
try:
plot_data(tempo_results, options.xaxis, options.yaxis, \
postfit=options.postfit, prefit=options.prefit, \
interactive=options.interactive, \
mark_peri=options.mark_peri, show_legend=options.legend)
except EmptyPlotValueError as msg:
print(msg)
print("Press 'p'/'P' to add prefit/postfit plot.")
plt.figtext(0.5, 0.5, (str(msg) + "\n" + \
"Press 'p'/'P' to add prefit/postfit plot."), \
horizontalalignment='center', \
verticalalignment='center', \
bbox=dict(facecolor='white', alpha=0.75))
fig.set_visible(True)
redrawplot()
def redrawplot():
plt.draw() #plt.show is keeping the plot open on nimrod, as opposed to plt.draw
#plt.show()
def quit():
print("Quitting...")
sys.exit(0)
def pick(event):
global tempo_results
index = event.ind
axes = event.mouseevent.inaxes
if axes:
title = axes.get_title()
postfit = ("Postfit" in title)
if len(index) == 1:
freq_label = event.artist.get_label()
info = tempo_results.get_info(freq_label, index, postfit)
print_text(info)
else:
print("Multiple TOAs selected. Zoom in and try again.")
def print_text(lines, *args, **kwargs):
"""Print lines of text (in a list) in the terminal."""
print('\n'.join(lines))
def print_help():
# Display help
print("Helping...")
print("-"*80)
print("Help - Hotkeys definitions:")
print("\th - Display this help")
print("\tq - Quit")
print("\ts - Save current plot(s) to PostScript file")
print("\tc - Try to determine optimal color pallete")
print("\tp - Toggle prefit display on/off")
print("\tP - Toggle postfit display on/off")
print("\tz - Toggle Zoom-mode on/off")
print("\tm - Toggle marking of periastron passages on/off")
print("\tL - Toggle legend on/off")
print("\to - Go to original view")
print("\t< - Go to previous view")
print("\t> - Go to next view")
print("\tx - Set x-axis limits (terminal input required)")
print("\ty - Sey y-axis limits (terminal input required)")
print("\tr - Reload residuals")
print("\tt - Cycle through y-axis types ('phase', 'usec', 'sec')")
print("\t[Space] - Cycle through x-axis types ('MJD', 'year', 'numTOA', 'orbitphase')")
print("\t[Left mouse] - Select TOA (display info in terminal)")
print("\t - Select zoom region (if Zoom-mode is on)")
print("-"*80)
def keypress(event):
global tempo_results
global options
global xind, xvals
global yind, yvals
if type(event.key) in [bytes, str]:
if event.key.lower() == 'q':
quit()
elif event.key.lower() == 's':
savefigure()
elif event.key.lower() == 'c':
options.freqbands = None
reloadplot()
elif event.key.lower() == 'r':
reloadplot()
elif event.key.upper() == 'L':
leg = plt.gcf().legends[0]
options.legend = not options.legend
leg.set_visible(options.legend)
redrawplot()
elif event.key.lower() == 'z':
# Turn on zoom mode
print("Toggling zoom mode...")
event.canvas.toolbar.zoom()
elif event.key.lower() == 'm':
# Toggle peri markings
print("Toggling periastron passage markings...")
options.mark_peri = not options.mark_peri
reloadplot()
elif event.key.lower() == 'o':
# Restore plot to original view
print("Restoring plot...")
event.canvas.toolbar.home()
elif event.key.lower() == ',' or event.key.lower() == '<':
# Go back to previous plot view
print("Going back...")
event.canvas.toolbar.back()
elif event.key.lower() == '.' or event.key.lower() == '>':
# Go forward to next plot view
print("Going forward...")
event.canvas.toolbar.forward()
elif event.key.lower() == ' ':
xind = (xind + 1) % len(xvals)
print("Toggling plot type...[%s]"%xvals[xind], xind)
options.xaxis = xvals[xind]
reloadplot()
elif event.key.lower() == 't':
yind = (yind + 1) % len(yvals)
print("Toggling plot scale...[%s]"%yvals[yind], yind)
options.yaxis = yvals[yind]
reloadplot()
elif event.key == 'p':
options.prefit = not options.prefit
print("Toggling prefit-residuals display to: %s" % \
((options.prefit and "ON") or "OFF"))
reloadplot()
elif event.key == 'P':
options.postfit = not options.postfit
print("Toggling postfit-residuals display to: %s" % \
((options.postfit and "ON") or "OFF"))
reloadplot()
elif event.key.lower() == 'x':
# Set x-axis limits
print("Setting x-axis limits. User input required...")
xmin = input("X-axis minimum: ")
xmax = input("X-axis maximum: ")
try:
xmin = float(xmin)
xmax = float(xmax)
if xmax <= xmin:
raise ValueError
except ValueError:
print("Bad values provided!")
return
plt.xlim(xmin, xmax)
elif event.key.lower() == 'y':
global axes
# Set y-axis limits
print("Setting y-axis limits. User input required...")
if len(axes) == 2:
axes_to_adjust = input("Axes to adjust (pre/post): ")
if axes_to_adjust.lower().startswith('pre'):
plt.axes(axes[0])
elif axes_to_adjust.lower().startswith('post'):
plt.axes(axes[1])
else:
raise ValueError
ymin = input("Y-axis minimum: ")
ymax = input("Y-axis maximum: ")
try:
ymin = float(ymin)
ymax = float(ymax)
if ymax <= ymin:
raise ValueError
except ValueError:
print("Bad values provided!")
return
plt.ylim(ymin, ymax)
elif event.key.lower() == 'h':
print_help()
def mjd_to_year(mjds):
mjds = np.asarray(mjds)
if mjds.size < 1:
return mjds
old_shape = mjds.shape # Remember original shape
mjds.shape = (mjds.size, 1)
years, months, days, fracs, stats = np.apply_along_axis(slalib.sla_djcl, 1, mjds).transpose()
# Take into account leap years
daysperyear = (((years % 4) == 0) & (((years % 100) != 0) | ((years % 400) == 0))) * 1 + 365.0
years, days, stats = np.array([slalib.sla_clyd(*ymd) for ymd in np.vstack((years, months, days)).transpose()]).transpose()
mjds.shape = old_shape # Change back to original shape
return (years + (days + fracs) / daysperyear)
def parse_options():
(options, sys.argv) = parser.parse_args()
if sys.argv==[]:
sys.argv = ['pyplotres.py']
if not options.freqs:
# Default frequency bands
freqbands = [['0', '400'],
['400', '600'],
['600', '1000'],
['1000', '1600'],
['1600', '2400'],
['2400', 'inf']]
else:
freqbands = []
for fopt in options.freqs:
f = fopt.split(':')
if f[0]=='':
f[0] = '0'
if f[-1]=='':
f[-1] = 'inf'
if len(f) > 2:
for i in range(0, len(f)-1):
freqbands.append(f[i:i+2])
else:
freqbands.append(f)
freqbands = np.array(freqbands).astype(float)
freqbands[freqbands.argsort(axis=0).transpose()[0]]
if np.any(freqbands.flat != sorted(freqbands.flat)):
raise ValueError("Frequency bands have overlaps or are inverted.")
options.freqbands = freqbands
if not options.prefit and not options.postfit:
# If neither prefit or postfit are selected
# show postfit
options.postfit = True
if options.xaxis.lower() not in xvals:
raise BadOptionValueError("Option to -x/--x-axis (%s) is not permitted." % \
options.xaxis)
if options.yaxis.lower() not in yvals:
raise BadOptionValueError("Option to -y/--y-axis (%s) is not permitted." % \
options.yaxis)
return options
def main():
global tempo_results
global options
options = parse_options()
tempo_results = TempoResults(options.freqbands)
create_plot()
reloadplot()
if options.interactive:
fig = plt.gcf() # current figure
# Before setting up our own event handlers delete matplotlib's
# default 'key_press_event' handler.
defcids = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for cid in defcids:
fig.canvas.callbacks.disconnect(cid)
# Now, register our event callback functions
cid_keypress = fig.canvas.mpl_connect('key_press_event', keypress)
cid_pick = fig.canvas.mpl_connect('pick_event', pick)
# Finally, let the show begin!
#plt.ion()
plt.show()
else:
# Save figure and quit
savefigure()
quit()
class BadOptionValueError(ValueError):
"""Bad value passed to option parser.
"""
pass
class EmptyPlotValueError(ValueError):
"""Empty plot.
"""
pass
if __name__=='__main__':
parser = optparse.OptionParser(prog="pyplotres.py", \
version="v1.2 Patrick Lazarus (Mar. 29, 2010)")
parser.add_option('-f', '--freq', dest='freqs', action='append', \
help="Band of frequencies, in MHz, to be plotted " \
"(format xxx:yyy). Each band will have a " \
" different colour. Multiple -f/--freq options " \
" are allowed. (Default: Plot all frequencies " \
"in single colour.)", \
default=[])
parser.add_option('-x', '--x-axis', dest='xaxis', type='string', \
help="Values to plot on x-axis. Must be one of " \
"%s. (Default: '%s')" % (str(xvals), xvals[xind]),
default=xvals[xind])
parser.add_option('-y', '--y-axis', dest='yaxis', type='string', \
help="Values to plot on y-axis. Must be one of "
"%s. (Default: '%s')" % (str(yvals), yvals[yind]), \
default=yvals[yind])
parser.add_option('--post', dest='postfit', action='store_true', \
help="Show postfit residuals. (Default: Don't show " \
"postfit.)", \
default=False)
parser.add_option('--pre', dest='prefit', action='store_true', \
help="Show prefit residuals. (Default: Don't show " \
"prefit.)", \
default=False)
parser.add_option('-l', '--legend', dest='legend', action='store_true', \
help="Show legend of frequencies. (Default: Do not " \
"show legend.)", \
default=False)
parser.add_option('--mark-peri', dest='mark_peri', action='store_true', \
help="Mark passage of periastron. (Default: don't " \
"mark periastron.)", \
default=False)
parser.add_option('--non-interactive', dest='interactive', \
action='store_false', default=True, \
help="Save figure and exit. (Default: Show plot, " \
"only save if requested.)")
main()
| 28,009 | 37.687845 | 134 | py |
presto | presto-master/bin/simple_zapbirds.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
import sys
import os
import numpy as np
import presto.infodata as pi
import presto.presto as pp
scopes = {"gbt": "GB", "arecibo": "AO", "vla": "VL", "parkes": "PK",
"jodrell": "JB", "gb43m": "G1", "gb 140ft": "G1", "nrao20": "G1",
"nancay": "NC", "effelsberg": "EF", "srt": "SR", "fast": "FA",
"wsrt": "WT", "gmrt": "GM", "chime": "CH", "lofar": "LF",
"lwa": "LW", "mwa": "MW", "meerkat": "MK", "ata": "AT",
"k7": "K7", "geocenter": "0 "}
def mod_get_baryv(ra, dec, mjd, T, obs="PK", bary=True):
"""
mod_get_baryv(ra, dec, mjd, T, obs="PK", bary=True):
Determine the average barycentric velocity towards 'ra', 'dec'
during an observation from 'obs'. The RA and DEC are in the
standard string format (i.e. 'hh:mm:ss.ssss' and 'dd:mm:ss.ssss').
'T' is in sec and 'mjd' is (of course) in MJD. The obs variable
is the standard two character string from TEMPO: PK, GB, AO, GM, JB, ...
If bary is true, we will need to back out the topocentric times
"""
if bary:
tts = np.linspace(mjd, mjd + T / 86400.0, 200)
else:
tts = np.linspace(mjd - 500.0 /86400.0,
mjd + (T + 500) / 86400.0, 200)
nn = len(tts)
bts = np.zeros(nn, dtype=np.float64)
vel = np.zeros(nn, dtype=np.float64)
pp.barycenter(tts, bts, vel, ra, dec, obs, "DE421")
if bary:
ind0 = np.fabs(tts - mjd).argmin()
ind1 = np.fabs(tts - (mjd + T / 86400.0)).argmin()
# newtts = tts - (bts - tts)
return vel[ind0:ind1].mean()
else:
return vel.mean()
def group_infiles(infilenms):
"""Find the common basenames of files, and sort each by numerical DM, if present
Parameters
----------
infilenms : list of strings
These are the input filenames
Returns
-------
sorted list of filenames
"""
# Make sure that all the files are ".fft" files
for infilenm in infilenms:
assert(infilenm.endswith(".fft"))
# Sort the filenames
names = sorted(infilenms)
basenames = []
DMs = []
for name in names:
try:
ind = name.rfind("_DM")
if name[:ind] not in basenames:
basenames.append(name[:ind])
try:
dm = float(name[ind+3:-4])
except ValueError:
dm = None
except ValueError:
if name[:-4] not in basenames:
basenames.append(name[:-4])
dm = None
DMs.append(dm)
if len(basenames)==1:
print(f"All files have the same basename '{basenames[0]}'")
if len(DMs)>1 and None in DMs:
print("Not all input file names have DM values")
# Now sort via basename first, then DM, then anything else
outnames = []
for basename in basenames:
tmp = []
nodms = []
for ii, name in enumerate(names):
if name.startswith(basename):
if DMs[ii] is not None:
tmp.append((DMs[ii], name))
else:
nodms.append(name)
tmp = sorted(tmp) # This sorts by DM, numerically
for fn in tmp: # These are the files with DMs
outnames.append(fn[1])
for fn in nodms: # These are the files without DMs
outnames.append(fn)
assert(len(outnames)==len(names))
return basenames, outnames
def read_birds(birdsname):
print(f"Reading the birds from '{birdsname}'")
with open(birdsname, "r") as bfile:
return bfile.readlines()
def process_birds(birdlines, T, baryv, info):
psrs = 0
freqs = 0
trains = 0
birds = []
# PSRs get 40 bins minimum zapped (overkill for most,
# but required for the _really_ bright ones
min_psr_width = 40.0 / T
for line in birdlines:
bary = 0
baryfact = 1.0
line = line[:-1]
if (len(line)<=3 or line[0]=='#'):
continue
elif (line[0]=='P'):
(tmp, psrname, numharm) = line.split()
numharm = int(numharm)
psr = pp.psrepoch(psrname, info.epoch)
if (psr.orb.p):
(minv, maxv) = pp.binary_velocity(T, psr.orb)
psrs += 1
for harm in range(1, numharm+1):
if (psr.orb.p):
midv = 0.5 * (maxv + minv)
midf = (1.0 + midv) * psr.f * harm
width = (maxv - minv) * psr.f * harm
if (0.1 * width < min_psr_width):
width = width + min_psr_width
else:
width = width * 1.1
else:
midf = psr.f * harm
width = min_psr_width
if info.bary==0:
midf /= (1.0 + baryv)
birds.append((midf, width))
else:
words = line.split()
increase_width = 0
bary = 0
if (len(words) >= 3):
freq = float(words[0])
width = float(words[1])
numharm = int(words[2])
if (len(words) >= 4):
increase_width = int(words[3])
if (len(words) >= 5):
bary = int(words[4])
if info.bary:
baryfact = 1.0 if bary else (1.0 + baryv)
else:
baryfact = (1.0 + baryv) if bary else 1.0
trains += 1
if (increase_width):
for harm in range(1, numharm+1):
birds.append((freq * harm * baryfact, width * harm))
else:
for harm in range(1, numharm+1):
birds.append((freq * harm * baryfact, width))
else:
freqs += 1
birds.append((float(words[0]), float(words[1])))
print(" Read %d freqs, %d pulsars, and %d harmonic series." % \
(freqs, psrs, trains))
print(" Total number of birdies = %d" % (len(birds)))
return sorted(birds)
def zapfile(fftfile, zaplist, info):
"""Zap the frequencies and widths in zaplist from fftfile
Parameters
----------
fftfile : file oject
The .fft file that will be zapped (opened in "rb+" mode)
zaplist : list of tuples
List of (freq, width)s (in Hz) to zap
info : infodata object
From the .inf file describing the .fft file
"""
# Use memory-mapping
ft = np.memmap(fftfile, mode='r+', dtype='complex64')
T = info.dt * info.N
for (f, w) in zaplist:
lor = int(np.round((f - 0.5 * w) * T))
if lor < 1: lor = 1
if lor > len(ft): break
hir = int(np.round((f + 0.5 * w) * T)) + 1
if hir > len(ft): hir = len(ft)
# print(lor, hir, lor/T, hir/T)
# To zap, so that median normalization works, and the Fourier
# phases are still correct, get the median level just outside
# the window, and use that as the target level within the
# zap window. Remember that median != mean in power spectra
winlol = int(np.round((f - 2 * w) * T))
if winlol < 1: winlol = 1
winhir = int(np.round((f + 2 * w) * T))
if winhir > len(ft): winhir = len(ft)
win = np.abs(np.concatenate((ft[winlol:lor], ft[hir:winhir])))
tgt = np.sqrt(np.median(win**2) / np.log(2)) # sqrt(window mean power)
# the following sets each zap region aplitude to tgt
ft[lor:hir] *= tgt / np.abs(ft[lor:hir])
ft.flush()
fftfile.close()
if __name__ == '__main__':
if len(sys.argv)==1:
print(
"""\nusage: simple_zapbirds.py .birdsfile .fftfile(s)
This routine does what makezaplist.py and zapbirds do, but all in
one command, and over multiple .fft files. It also auto-determines
the barycentric velocity.
The format of the .birds file is a simple text file as shown below.
Lines starting with '#' are comments and with 'P' are assumed to name a
pulsar in the ATNF catalog. The only columns that are required are the
first (which specifies a freq, in Hz) and the second, which specifies
the width (or, if a pulsar, the number of harmonics zapped). All others
are optional.
The 'grow' flag specifies if the width for each harmonic increases in size.
That is sometimes useful for some types of RFI or for binary pulsars.
The 'bary' column tells whether the specified freq is barycentric
or not (i.e. topocentric, like pure, local, RFI tones).
Example .birds file:
#-------------------------------------
# Freq Width #harm grow? bary?
#-------------------------------------
28.760 0.1 3 0 0
60.0 0.05 2 1 0
# Zaps 10 harmonics for PSR J1643-1224:
PSR J1643-1224 10
# Zap 100 Hz with a width of 0.2 Hz
100.0 0.2
""")
else:
birds = read_birds(sys.argv[1])
bases, infilenms = group_infiles(sys.argv[2:])
lastsize = 0
lastT = 0
lastbase = bases[0]
baryv = 0
for infilenm in infilenms:
currsize = os.stat(infilenm).st_size
with open(infilenm, "rb+") as infile:
currbase = [x for x in bases if infilenm.startswith(x)][-1]
if (currsize != lastsize) or (currbase != lastbase):
fn, ext = os.path.splitext(infilenm)
print(f"Reading file info from '{fn}.inf'")
info = pi.infodata(fn+".inf")
currT = info.dt * info.N
# Only re-compute baryv if we need to
if baryv==0 or (currbase != lastbase):
baryv = mod_get_baryv(info.RA, info.DEC, info.epoch, currT,
obs=scopes[info.telescope.lower()],
bary=info.bary)
# Only re-compute freqs to zap if the times are also different
if (currT != lastT):
zaplist = process_birds(birds, currT, baryv, info)
# print(zaplist)
# Now actually do the zapping
print(f"Zapping '{infilenm}' ... ", end='')
zapfile(infile, zaplist, info)
print("done.")
lastsize = currsize
lastbase = currbase
lastT = currT
| 10,606 | 36.613475 | 84 | py |
presto | presto-master/bin/waterfaller.py | #!/usr/bin/env python
"""
waterfaller.py
Make waterfall plots to show frequency sweep of a single pulse.
Reads PSRFITS or SIGPROC filterbank format files.
Patrick Lazarus - Aug. 19, 2011
Paul Scholz - Nov 2015
"""
import optparse
import matplotlib.pyplot as plt
import matplotlib.cm
import numpy as np
from presto import psr_utils
from presto import rfifind
from presto import psrfits
from presto import filterbank
SWEEP_STYLES = ['r-', 'b-', 'g-', 'm-', 'c-']
def get_mask(rfimask, startsamp, N):
"""Return an array of boolean values to act as a mask
for a Spectra object.
Inputs:
rfimask: An rfifind.rfifind object
startsamp: Starting sample
N: number of samples to read
Output:
mask: 2D numpy array of boolean values.
True represents an element that should be masked.
"""
sampnums = np.arange(startsamp, startsamp+N)
blocknums = np.floor(sampnums/rfimask.ptsperint).astype('int')
mask = np.zeros((N, rfimask.nchan), dtype='bool')
for blocknum in np.unique(blocknums):
blockmask = np.zeros_like(mask[blocknums==blocknum])
chans_to_mask = rfimask.mask_zap_chans_per_int[blocknum]
if chans_to_mask.any():
blockmask[:,chans_to_mask] = True
mask[blocknums==blocknum] = blockmask
return mask.T
def maskfile(maskfn, data, start_bin, nbinsextra):
rfimask = rfifind.rfifind(maskfn)
mask = get_mask(rfimask, start_bin, nbinsextra)[::-1]
masked_chans = mask.all(axis=1)
# Mask data
data = data.masked(mask, maskval='median-mid80')
#datacopy = copy.deepcopy(data)
return data, masked_chans
def waterfall(rawdatafile, start, duration, dm=None, nbins=None, nsub=None,\
subdm=None, zerodm=False, downsamp=1, scaleindep=False,\
width_bins=1, mask=False, maskfn=None, bandpass_corr=False,
ref_freq=None):
"""
Create a waterfall plot (i.e. dynamic specrum) from a raw data file.
Inputs:
rawdatafile - a PsrfitsData instance.
start - start time of the data to be read in for waterfalling.
duration - duration of data to be waterfalled.
Optional Inputs:
dm - DM to use when dedispersing data.
Default: Don't de-disperse
nbins - Number of time bins to plot. This option overrides
the duration argument.
Default: determine nbins from duration.
nsub - Number of subbands to use. Must be a factor of number of channels.
Default: Number of channels.
subdm - DM to use when subbanding. Default: same as dm argument.
zerodm - subtract mean of each time-sample from data before
de-dispersing.
downsamp - Factor to downsample in time by. Default: Don't downsample.
scaleindep - Scale each channel independently.
Default: Scale using global maximum.
width_bins - Smooth each channel/subband with a boxcar width_bins wide.
Default: Don't smooth.
maskfn - Filename of RFIFIND mask to use for masking data.
Default: Don't mask data.
bandpass_corr - Correct for the bandpass. Requires an rfifind
mask provided by maskfn keyword argument.
Default: Do not remove bandpass.
ref_freq - Reference frequency to de-disperse to.
If subbanding and de-dispersing the start time
will be corrected to account for change in
reference frequency.
Default: Frequency of top channel.
Outputs:
data - Spectra instance of waterfalled data cube.
nbinsextra - number of time bins read in from raw data.
nbins - number of bins in duration.
start - corrected start time.
"""
if subdm is None:
subdm = dm
# Read data
if ref_freq is None:
ref_freq = rawdatafile.freqs.max()
if nsub and dm:
df = rawdatafile.freqs[1] - rawdatafile.freqs[0]
nchan_per_sub = rawdatafile.nchan/nsub
top_ctrfreq = rawdatafile.freqs.max() - \
0.5*nchan_per_sub*df # center of top subband
start += 4.15e3 * np.abs(1./ref_freq**2 - 1./top_ctrfreq**2) * dm
start_bin = np.round(start/rawdatafile.tsamp).astype('int')
dmfac = 4.15e3 * np.abs(1./rawdatafile.frequencies[0]**2 - 1./rawdatafile.frequencies[-1]**2)
if nbins is None:
nbins = np.round(duration/rawdatafile.tsamp).astype('int')
if dm:
nbinsextra = np.round((duration + dmfac * dm)/rawdatafile.tsamp).astype('int')
else:
nbinsextra = nbins
# If at end of observation
if (start_bin + nbinsextra) > rawdatafile.nspec-1:
nbinsextra = rawdatafile.nspec-1-start_bin
data = rawdatafile.get_spectra(start_bin, nbinsextra)
# Masking
if mask and maskfn:
data, masked_chans = maskfile(maskfn, data, start_bin, nbinsextra)
else:
masked_chans = np.zeros(rawdatafile.nchan,dtype=bool)
# Bandpass correction
if maskfn and bandpass_corr:
bandpass = rfifind.rfifind(maskfn).bandpass_avg[::-1]
#bandpass[bandpass == 0] = np.min(bandpass[np.nonzero(bandpass)])
masked_chans[bandpass == 0] = True
# ignore top and bottom 1% of band
ignore_chans = np.ceil(0.01*rawdatafile.nchan)
masked_chans[:ignore_chans] = True
masked_chans[-ignore_chans:] = True
data_masked = np.ma.masked_array(data.data)
data_masked[masked_chans] = np.ma.masked
data.data = data_masked
if bandpass_corr:
data.data /= bandpass[:, None]
# Zerodm filtering
if (zerodm == True):
data.data -= data.data.mean(axis=0)
# Subband data
if (nsub is not None) and (subdm is not None):
data.subband(nsub, subdm, padval='mean')
# Dedisperse
if dm:
data.dedisperse(dm, padval='mean')
# Downsample
data.downsample(downsamp)
# scale data
data = data.scaled(scaleindep)
# Smooth
if width_bins > 1:
data.smooth(width_bins, padval='mean')
return data, nbinsextra, nbins, start
def plot_waterfall(data, start, duration,
integrate_ts=False, integrate_spec=False, show_cb=False,
cmap_str="gist_yarg", sweep_dms=[], sweep_posns=[],
ax_im=None, ax_ts=None, ax_spec=None, interactive=True):
""" I want a docstring too!
"""
# Set up axes
if interactive:
fig = plt.figure()
fig.canvas.set_window_title("Frequency vs. Time")
im_width = 0.6 if integrate_spec else 0.8
im_height = 0.6 if integrate_ts else 0.8
if not ax_im:
ax_im = plt.axes((0.15, 0.15, im_width, im_height))
if integrate_ts and not ax_ts:
ax_ts = plt.axes((0.15, 0.75, im_width, 0.2),sharex=ax_im)
if integrate_spec and not ax_spec:
ax_spec = plt.axes((0.75, 0.15, 0.2, im_height),sharey=ax_im)
# Ploting it up
nbinlim = np.int64(duration/data.dt)
img = ax_im.imshow(data.data[..., :nbinlim], aspect='auto',
cmap=matplotlib.cm.cmap_d[cmap_str],
interpolation='nearest', origin='upper',
extent=(data.starttime, data.starttime+ nbinlim*data.dt,
data.freqs.min(), data.freqs.max()))
if show_cb:
cb = ax_im.get_figure().colorbar(img)
cb.set_label("Scaled signal intensity (arbitrary units)")
#plt.axis('tight')
# Sweeping it up
for ii, sweep_dm in enumerate(sweep_dms):
ddm = sweep_dm-data.dm
delays = psr_utils.delay_from_DM(ddm, data.freqs)
delays -= delays.min()
if sweep_posns is None:
sweep_posn = 0.0
elif len(sweep_posns) == 1:
sweep_posn = sweep_posns[0]
else:
sweep_posn = sweep_posns[ii]
sweepstart = data.dt*data.numspectra*sweep_posn+data.starttime
sty = SWEEP_STYLES[ii%len(SWEEP_STYLES)]
ax_im.plot(delays+sweepstart, data.freqs, sty, lw=4, alpha=0.5)
# Dressing it up
ax_im.xaxis.get_major_formatter().set_useOffset(False)
ax_im.set_xlabel("Time")
ax_im.set_ylabel("Observing frequency (MHz)")
# Plot Time series
if integrate_ts:
Data = np.array(data.data[..., :nbinlim])
Dedisp_ts = Data.sum(axis=0)
times = (np.arange(data.numspectra)*data.dt + start)[..., :nbinlim]
ax_ts.plot(times, Dedisp_ts,"k")
ax_ts.set_xlim([times.min(),times.max()])
plt.setp(ax_ts.get_xticklabels(), visible = False)
plt.setp(ax_ts.get_yticklabels(), visible = False)
# Plot Spectrum
if integrate_spec:
spectrum_window = 0.05*duration
window_width = int(spectrum_window/data.dt) # bins
burst_bin = nbinlim//2
on_spec = np.array(data.data[..., burst_bin-window_width:burst_bin+window_width])
Dedisp_spec = on_spec.sum(axis=1)[::-1]
freqs = np.linspace(data.freqs.min(), data.freqs.max(), len(Dedisp_spec))
ax_spec.plot(Dedisp_spec,freqs,"k")
plt.setp(ax_spec.get_xticklabels(), visible = False)
plt.setp(ax_spec.get_yticklabels(), visible = False)
ax_spec.set_ylim([data.freqs.min(),data.freqs.max()])
if integrate_ts:
ax_ts.axvline(times[burst_bin]-spectrum_window,ls="--",c="grey")
ax_ts.axvline(times[burst_bin]+spectrum_window,ls="--",c="grey")
if interactive:
fig.suptitle("Frequency vs. Time")
fig.canvas.mpl_connect('key_press_event',
lambda ev: (ev.key in ('q','Q') and plt.close(fig)))
plt.show()
def main():
fn = args[0]
if fn.endswith(".fil"):
# Filterbank file
filetype = "filterbank"
rawdatafile = filterbank.FilterbankFile(fn)
elif fn.endswith(".fits"):
# PSRFITS file
filetype = "psrfits"
rawdatafile = psrfits.PsrfitsFile(fn)
else:
raise ValueError("Cannot recognize data file type from "
"extension. (Only '.fits' and '.fil' "
"are supported.)")
data, bins, nbins, start = waterfall(rawdatafile, options.start,
options.duration, dm=options.dm,
nbins=options.nbins, nsub=options.nsub,
subdm=options.subdm, zerodm=options.zerodm,
downsamp=options.downsamp,
scaleindep=options.scaleindep,
width_bins=options.width_bins, mask=options.mask,
maskfn=options.maskfile,
bandpass_corr=options.bandpass_corr)
plot_waterfall(data, start, options.duration, integrate_ts=options.integrate_ts,
integrate_spec=options.integrate_spec, show_cb=options.show_cb,
cmap_str=options.cmap, sweep_dms=options.sweep_dms,
sweep_posns=options.sweep_posns)
if __name__=='__main__':
parser = optparse.OptionParser(prog="waterfaller.py",
version="v0.9 Patrick Lazarus (Aug. 19, 2011)",
usage="%prog [OPTIONS] INFILE",
description="Create a waterfall plot to show the "
"frequency sweep of a single pulse "
"in psrFits data.")
parser.add_option('--subdm', dest='subdm', type='float',
help="DM to use when subbanding. (Default: "
"same as --dm)", default=None)
parser.add_option('--zerodm', dest='zerodm', action='store_true',
help="If this flag is set - Turn Zerodm filter - ON (Default: "
"OFF)", default=False)
parser.add_option('-s', '--nsub', dest='nsub', type='int',
help="Number of subbands to use. Must be a factor "
"of number of channels. (Default: "
"number of channels)", default=None)
parser.add_option('-d', '--dm', dest='dm', type='float',
help="DM to use when dedispersing data for plot. "
"(Default: 0 pc/cm^3)", default=0.0)
parser.add_option('--show-ts', dest='integrate_ts', action='store_true',
help="Plot the time series. "
"(Default: Do not show the time series)", default=False)
parser.add_option('--show-spec', dest='integrate_spec', action='store_true',
help="Plot the spectrum. "
"(Default: Do not show the spectrum)", default=False)
parser.add_option('--bandpass', dest='bandpass_corr', action='store_true',
help="Correct for the bandpass. Requires an rfifind "
"mask provided by --mask option."
"(Default: Do not remove bandpass)", default=False)
parser.add_option('-T', '--start-time', dest='start', type='float',
help="Time into observation (in seconds) at which "
"to start plot.")
parser.add_option('-t', '--duration', dest='duration', type='float',
help="Duration (in seconds) of plot.")
parser.add_option('-n', '--nbins', dest='nbins', type='int',
help="Number of time bins to plot. This option takes "
"precedence over -t/--duration if both are "
"provided.")
parser.add_option('--width-bins', dest='width_bins', type='int',
help="Smooth each channel/subband with a boxcar "
"this many bins wide. (Default: Don't smooth)",
default=1)
parser.add_option('--sweep-dm', dest='sweep_dms', type='float',
action='append',
help="Show the frequency sweep using this DM. "
"(Default: Don't show sweep)", default=[])
parser.add_option('--sweep-posn', dest='sweep_posns', type='float',
action='append',
help="Show the frequency sweep at this position. "
"The position refers to the high-frequency "
"edge of the plot. Also, the position should "
"be a number between 0 and 1, where 0 is the "
"left edge of the plot. "
"(Default: 0)", default=None)
parser.add_option('--downsamp', dest='downsamp', type='int',
help="Factor to downsample data by. (Default: 1).",
default=1)
parser.add_option('--maskfile', dest='maskfile', type='string',
help="Mask file produced by rfifind. Used for "
"masking and bandpass correction.",
default=None)
parser.add_option('--mask', dest='mask', action="store_true",
help="Mask data using rfifind mask (Default: Don't mask).",
default=False)
parser.add_option('--scaleindep', dest='scaleindep', action='store_true',
help="If this flag is set scale each channel "
"independently. (Default: Scale using "
"global maximum.)",
default=False)
parser.add_option('--show-colour-bar', dest='show_cb', action='store_true',
help="If this flag is set show a colour bar. "
"(Default: No colour bar.)",
default=False)
parser.add_option('--colour-map', dest='cmap',
help="The name of a valid matplotlib colour map."
"(Default: gist_yarg.)",
default='gist_yarg')
options, args = parser.parse_args()
if not hasattr(options, 'start'):
raise ValueError("Start time (-T/--start-time) "
"must be given on command line!")
if (not hasattr(options, 'duration')) and (not hasattr(options, 'nbins')):
raise ValueError("One of duration (-t/--duration) "
"and num bins (-n/--nbins)"
"must be given on command line!")
if options.subdm is None:
options.subdm = options.dm
main()
| 17,088 | 41.50995 | 97 | py |
presto | presto-master/bin/dat2tim.py | #!/usr/bin/env python
from __future__ import print_function
import os, struct, sys
from presto import presto
telescope_ids = {"Fake": 0, "Arecibo": 1, "Ooty": 2, "Nancay": 3,
"Parkes": 4, "Jodrell": 5, "GBT": 6, "GMRT": 7,
"Effelsberg": 8}
machine_ids = {"FAKE": 0, "PSPM": 1, "Wapp": 2,"AOFTM": 3,
"BCPM1": 4, "OOTY": 5, "SCAMP": 6,
"GBT Pulsar Spigot": 7, "SPIGOT": 7}
def prep_string(string):
return struct.pack('i', len(string))+string
def prep_double(name, value):
return prep_string(name)+struct.pack('d', float(value))
def prep_int(name, value):
return prep_string(name)+struct.pack('i', int(value))
def infodata_to_sigproc_header(inf):
hdr = prep_string("HEADER_START")
hdr += prep_int("telescope_id", telescope_ids[inf.telescope.strip()])
if len(inf.instrument.split()) > 1:
if inf.instrument.split()[0]=="Multibeam":
hdr += prep_int("machine_id", machine_ids["SCAMP"])
else:
hdr += prep_int("machine_id", machine_ids[inf.instrument.strip()])
hdr += prep_int("data_type", 2) # 1 = filterbank, 2 = timeseries
hdr += prep_string("source_name")
hdr += prep_string(inf.object)
hdr += prep_int("barycentric", inf.bary)
hdr += prep_int("pulsarcentric", 0)
hdr += prep_double("src_raj", inf.ra_h*10000.0 + inf.ra_m*100.0 + inf.ra_s)
if inf.dec_d > 0.0:
hdr += prep_double("src_dej", inf.dec_d*10000.0 + inf.dec_m*100.0 + inf.dec_s)
else:
hdr += prep_double("src_dej", inf.dec_d*10000.0 - inf.dec_m*100.0 - inf.dec_s)
hdr += prep_int("nbits", 32)
hdr += prep_int("nifs", 1)
hdr += prep_int("nchans", 1)
hdr += prep_double("fch1", inf.freq)
hdr += prep_double("foff", 1)
hdr += prep_double("tstart", inf.mjd_i + inf.mjd_f)
hdr += prep_double("tsamp", inf.dt)
hdr += prep_double("refdm", inf.dm)
hdr += prep_string("HEADER_END")
return hdr
if __name__ == "__main__":
if len(sys.argv) == 1:
print("\nusage: dat2tim.py file.dat\n")
sys.exit()
if sys.argv[1].endswith(".dat"):
basefilenm = sys.argv[1][:sys.argv[1].rfind(".dat")]
else:
basefilenm = sys.argv[1]
inf = presto.read_inffile(basefilenm)
outfile = open(basefilenm+".tim", "wb")
outfile.write(infodata_to_sigproc_header(inf))
outfile.close()
# Do this the simple (but kludgey) way
os.system("cat %s >> %s"%(sys.argv[1], basefilenm+".tim"))
| 2,486 | 34.028169 | 86 | py |
presto | presto-master/bin/gotocand.py | #!/usr/bin/env python
import sys
import os
import os.path
import glob
import string
import re
from subprocess import Popen, PIPE, STDOUT
from presto.presto import fourierprops, get_rzw_cand
short_re = re.compile("_\d\d\dM_\d\d_ACCEL_")
def determine_dt(candfile):
for line in open(candfile, 'r', encoding='utf-8'):
if line.startswith(" Width of each time series bin"):
return float(line.split()[-1])
def short_stuff(candfile, candnum, shortinfo, nodename, datfile, frac=0.5):
# Note: frac is the overlap fraction which is usually 0.5
# It needs to be explicitly changed if you used a different overlap!
tmp = shortinfo[0].split("_")
ii = int(tmp[2])
searchN = 1000000 * int(tmp[1][:-1])
fileN = get_datfile_len(nodename, datfile)
start = ii * float(searchN * frac) / fileN
end = start + float(searchN) / fileN
dt = determine_dt(candfile)
chunkT = dt * searchN
obsT = dt * fileN
cand = fourierprops()
get_rzw_cand(candfile+'.cand', candnum, cand)
# fourier props file reports average r and average z.
# We need the starting values for this chunk.
z0 = cand.z - 0.5 * cand.w
r0 = cand.r - 0.5 * z0 - cand.w / 6.0
f = r0 / chunkT
fd = z0 / chunkT**2
fdd = cand.w / chunkT**3
return (" -start %.3f -end %.3f "%(start, end),
"_%.3f-%.3f"%(start, end), f, fd, fdd)
def get_dm(filenm):
parts = filenm.split("_")
for part in parts:
if part[:2]=='DM':
return part[2:]
return None
def get_basename(filenm):
offset = filenm.find("_DM")
if offset > 0:
return filenm[:offset]
else:
return None
def find_node(DM):
nodefiles = glob.glob("node*")
for nodefile in nodefiles:
for line in open(nodefile):
if line[:4]=="nimr":
if DM in line.split():
return line.split()[0]
return None
def find_local_datfile(basename, DM):
p = Popen("find .. -name \*%s\*DM%s\*dat"%(basename, DM), shell=True,
bufsize=-1, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(i, o) = (p.stdin, p.stdout)
datfile = ''
for line in o:
line = line.strip()
if line.startswith(b"find:"):
line = line.join(line.split()[1:])
if line.endswith(b".dat"):
datfile = line.decode("utf-8")
print("'%s'"%datfile)
if datfile!='':
return datfile
def find_datfile(nodename, basename, DM):
p = Popen("ssh %s find -L /scratch -name \*%s\*DM%s\*dat"%(nodename, basename, DM),
shell=True, bufsize=-1, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(i, o) = (p.stdin, p.stdout)
datfile = ''
for line in o:
line = line.strip()
if line.startswith(b"find:"):
line = line.join(line.split()[1:])
if line.endswith(b".dat"):
datfile = line.decode("utf-8")
print("'%s'"%datfile)
if datfile!='' and datfile.startswith("/scratch"):
return datfile
return None
def get_datfile_len(nodename, datfile):
if nodename:
p = Popen("ssh %s ls -l %s | awk '{ print $5 };'"%(nodename, datfile),
shell=True, bufsize=-1, stdin=PIPE, stdout=PIPE, stderr=STDOUT,
close_fds=True)
(i, o) = (p.stdin, p.stdout)
else:
p = Popen("ls -l %s | awk '{ print $5 };'"%(datfile),
shell=True, bufsize=-1, stdin=PIPE, stdout=PIPE, stderr=STDOUT,
close_fds=True)
(i, o) = (p.stdin, p.stdout)
filelen = o.readline().decode("utf-8")
if filelen!='':
return int(filelen)/4
return None
if __name__ == "__main__":
if (len(sys.argv) < 2):
print("\nusage: gotocand.py [-local] candfile:candnum\n")
sys.exit(0)
local = 0
if (sys.argv[1]=="-local"):
local = 1
sys.argv.pop(1)
outdir = os.getcwd()
if (len(sys.argv) > 2):
extraargs = "".join(sys.argv[2:])
else:
extraargs = ""
candfile, candnum = sys.argv[1].split(':')
dm = get_dm(candfile)
if dm is None:
print("Error: Could not find a DM value in '%s'!"%candfile)
sys.exit(0)
base = get_basename(candfile)
if base is None:
print("Error: Could not find the base filename in '%s'!"%candfile)
sys.exit(0)
# Is the candidate from a short-chunk search?
shortcand = short_re.findall(candfile)
if (local):
node = None
datfile = find_local_datfile(base, dm)
else:
node = find_node(dm)
if node is None:
print("Error: Could not find the node where the dat file should be!")
sys.exit(0)
datfile = find_datfile(node, base, dm)
if datfile is None:
print("Error: Could not find .dat file on the node!")
sys.exit(0)
fullcandfile = os.path.join(outdir, candfile)+".cand"
outfile = base+"_DM%s"%dm
datfiledir, datfilenm = os.path.split(datfile)
if not local:
print("\nGoing to %s and folding candidate #%s from the file %s."%\
(node,candnum,candfile))
print(" Folding command:")
if shortcand:
shortparts, shortoutext, f, fd, fdd = short_stuff(candfile, int(candnum),
shortcand, node, datfile)
extraargs += shortparts
outfile += shortoutext
foldcommand = "prepfold %s -f %.15g -fd %.15g -fdd %.15g -o %s %s"%\
(extraargs, f, fd, fdd, outfile, datfile)
print(foldcommand)
if not local:
os.system("ssh -X %s 'cd %s ; %s'"%(node, datfiledir, foldcommand))
os.system("scp -c blowfish %s:%s*_%.2f*.pfd* %s"% \
(node, os.path.join(datfiledir, outfile), f, outdir))
else:
os.system("%s"%(foldcommand))
else:
foldcommand = "prepfold %s -accelcand %s -accelfile %s -o %s %s"%\
(extraargs, candnum, fullcandfile, outfile, datfile)
print(" %s"%foldcommand)
if not local:
os.system("ssh -X %s 'cd %s ; %s'"%(node, datfiledir, foldcommand))
os.system("scp -c blowfish %s:%s*ACCEL_Cand_%d*.pfd* %s"% \
(node, os.path.join(datfiledir, outfile), int(candnum), outdir))
else:
os.system("%s"%(foldcommand))
| 6,431 | 33.031746 | 93 | py |
presto | presto-master/bin/event_peak.py | #!/usr/bin/env python
from __future__ import print_function
import numpy as num
import sys
import presto.events as evts
from presto import kuiper
from presto.Pgplot import *
if len(sys.argv) != 2:
print("\nusage: {} file\n".format(sys.argv[0]))
sys.exit(1)
def calc_phases(events, f, fd):
return num.fmod(events*(f+(0.5*fd*events)), 1.0)
events = num.loadtxt(sys.argv[1])
events.sort()
print("Read %d events from '%s'." % (events.size, sys.argv[1]))
minT, maxT = events.min(), events.max()
events -= minT
T = maxT - minT
if T > 100:
print("Assuming that the events are in seconds (T = %.1f sec)" % T)
else:
events *= 86400.0
print("Assuming that the events are in days (T = %.3f days)" % T)
T *= 86400.0
fctr = float(sys.argv[2])
fdctr = float(sys.argv[3])
osamp = 10
df = 1.0 / (osamp * T)
dfd = 4.0 / (osamp * T * T)
nn = 101 # number of f and fd trials
print("osamp = %d, nn = %d" % (osamp, nn))
print(" fd = %g" % df)
print("dfd = %g" % dfd)
n = (nn-1)/2
fs = num.linspace(fctr-n*df, fctr+n*df, nn)
fds = num.linspace(fdctr-n*dfd, fdctr+n*dfd, nn)
kuipers = num.zeros((nn, nn), dtype=float)
htests = num.zeros((nn, nn), dtype=float)
minPk = minPh = 1.0
for ii, fd in enumerate(fds):
print(ii)
for jj, f in enumerate(fs):
phases = calc_phases(events, f, fd)
D, Pk = kuiper.kuiper_uniform_test(phases)
h, harm = evts.Htest_exact(phases)
Ph = evts.Hstat_prob(h)
kuipers[ii, jj] = Pk
htests[ii, jj] = Ph
#print D, Pk, h, harm, Ph
if Pk < minPk:
minPk, fk, fdk = Pk, f, fd
if Ph < minPh:
minPh, fh, fdh, bestharm = Ph, f, fd, harm
print()
print("Min P(kuiper) = %.2e at f = %g, fd = %g" % (minPk, fk, fdk))
print("Min P(h-test) = %.2e at f = %g, fd = %g, (%d harmonics)" % \
(minPh, fh, fdh, bestharm))
sigmas = num.asarray([3.0, 5.0, 7.0])
contours = num.log10(1.0-evts.gauss_sigma_to_prob(sigmas))[::-1]
print("Kuiper")
plot2d(num.log10(kuipers), fs, fds,
labx="Frequency (Hz)", laby="F-dot (Hz/s)")
#contours=contours, color='black', width=6)
closeplot()
print("H-test")
plot2d(num.log10(htests), fs, fds,
labx="Frequency (Hz)", laby="F-dot (Hz/s)")
#contours=contours, color='black')
closeplot()
| 2,281 | 25.847059 | 71 | py |
presto | presto-master/bin/PALFA_presto_search.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import zip
from builtins import str
from builtins import range
from builtins import object
from operator import attrgetter
import glob, os, os.path, socket, struct, sys, time, tarfile
import numpy
from presto import psr_utils
from presto import presto
from presto import sifting
from presto import sigproc
# Calling convention:
#
# PALFA_presto_search.py fil_file working_dir
#
# fil_file is the filterbank file name
# working_dir is the scratch directory where the work should be done
# In general, there should be ~30GB of scratch disk per beam.
# If you do not have that much scratch space, you will likely
# need to to use set the use_subbands flag below.
# Basic parameters
# institution is one of: 'UBC', 'NRAOCV', 'McGill', 'Columbia', 'Cornell', 'UTB'
institution = "NRAOCV"
base_output_directory = "/home/sransom/results/ALFA"
db_pointing_file = "/home/sransom/results/ALFA/PALFA_coords_table.txt"
# The following determines if we'll dedisperse and fold using subbands.
# In general, it is a very good idea to use them if there is enough scratch
# space on the machines that are processing (~30GB/beam processed)
use_subbands = True
# Tunable parameters for searching and folding
# (you probably don't need to tune any of them)
rfifind_chunk_time = 2**15 * 0.000064 # ~2.1 sec for dt = 64us
singlepulse_threshold = 5.0 # threshold SNR for candidate determination
singlepulse_plot_SNR = 6.0 # threshold SNR for singlepulse plot
singlepulse_maxwidth = 0.1 # max pulse width in seconds
to_prepfold_sigma = 6.0 # incoherent sum significance to fold candidates
max_cands_to_fold = 150 # Never fold more than this many candidates
numhits_to_fold = 2 # Number of DMs with a detection needed to fold
low_DM_cutoff = 2.0 # Lowest DM to consider as a "real" pulsar
lo_accel_numharm = 16 # max harmonics
lo_accel_sigma = 2.0 # threshold gaussian significance
lo_accel_zmax = 0 # bins
lo_accel_flo = 2.0 # Hz
hi_accel_numharm = 8 # max harmonics
hi_accel_sigma = 3.0 # threshold gaussian significance
hi_accel_zmax = 50 # bins
hi_accel_flo = 1.0 # Hz
low_T_to_search = 20.0 # sec
# Sifting specific parameters (don't touch without good reason!)
sifting.sigma_threshold = to_prepfold_sigma-1.0 # incoherent power threshold (sigma)
sifting.c_pow_threshold = 100.0 # coherent power threshold
sifting.r_err = 1.1 # Fourier bin tolerence for candidate equivalence
sifting.short_period = 0.0005 # Shortest period candidates to consider (s)
sifting.long_period = 15.0 # Longest period candidates to consider (s)
sifting.harm_pow_cutoff = 8.0 # Power required in at least one harmonic
def fix_fil_posn(fil_filenm, hdrlen, ra, dec):
"""
fix_fil_posn(fil_filenm, hdrlen, ra, dec):
Modify the filterbank header and update the RA and DEC
fields using the values given as input. ra and dec
should be in 'HH:MM:SS.SSSS' and 'DD:MM:SS.SSSS' format.
hdrlen is the length of the filterbank header as
reported by PRESTO's 'readfile' or SIGPROC's 'header'.
"""
newra = float(ra.replace(":", ""))
newdec = float(dec.replace(":", ""))
header = open(fil_filenm).read(hdrlen)
ra_ptr = header.find("src_raj")+len("src_raj")
dec_ptr = header.find("src_dej")+len("src_dej")
filfile = open(fil_filenm, 'rb+')
filfile.seek(ra_ptr)
filfile.write(struct.pack('d', newra))
filfile.seek(dec_ptr)
filfile.write(struct.pack('d', newdec))
filfile.close()
def read_db_posn(orig_filenm, beam):
"""
read_db_posn(orig_filenm, beam):
Find the original WAPP filename in the db_pointing_file
and return the sexagesimal position strings for
the choen beam in that file. Return None if not found.
"""
offset = beam % 2
for line in open(db_pointing_file):
sline = line.split()
if sline[0].strip() == orig_filenm:
ra_str = sline[2*offset+1].strip()
dec_str = sline[2*offset+2].strip()
return ra_str, dec_str
return None
def find_masked_fraction(obs):
"""
find_masked_fraction(obs):
Parse the output file from an rfifind run and return the
fraction of the data that was suggested to be masked.
"""
rfifind_out = obs.basefilenm + "_rfifind.out"
for line in open(rfifind_out):
if "Number of bad intervals" in line:
return float(line.split("(")[1].split("%")[0])/100.0
# If there is a problem reading the file, return 100%
return 100.0
def get_all_subdms(ddplans):
"""
get_all_subdms(ddplans):
Return a sorted array of the subdms from the list of ddplans.
"""
subdmlist = []
for ddplan in ddplans:
subdmlist += [float(x) for x in ddplan.subdmlist]
subdmlist.sort()
subdmlist = numpy.asarray(subdmlist)
return subdmlist
def find_closest_subbands(obs, subdms, DM):
"""
find_closest_subbands(obs, subdms, DM):
Return the basename of the closest set of subbands to DM
given an obs_info class and a sorted array of the subdms.
"""
subdm = subdms[numpy.fabs(subdms - DM).argmin()]
return "subbands/%s_DM%.2f.sub[0-6]*"%(obs.basefilenm, subdm)
def timed_execute(cmd):
"""
timed_execute(cmd):
Execute the command 'cmd' after logging the command
to STDOUT. Return the wall-clock amount of time
the command took to execute.
"""
sys.stdout.write("\n'"+cmd+"'\n")
sys.stdout.flush()
start = time.time()
os.system(cmd)
end = time.time()
return end - start
def get_folding_command(cand, obs, ddplans):
"""
get_folding_command(cand, obs, ddplans):
Return a command for prepfold for folding the subbands using
an obs_info instance, a list of the ddplans, and a candidate
instance that describes the observations and searches.
"""
# Folding rules are based on the facts that we want:
# 1. Between 24 and 200 bins in the profiles
# 2. For most candidates, we want to search length = 101 p/pd/DM cubes
# (The side of the cube is always 2*M*N+1 where M is the "factor",
# either -npfact (for p and pd) or -ndmfact, and N is the number of bins
# in the profile). A search of 101^3 points is pretty fast.
# 3. For slow pulsars (where N=100 or 200), since we'll have to search
# many points, we'll use fewer intervals in time (-npart 30)
# 4. For the slowest pulsars, in order to avoid RFI, we'll
# not search in period-derivative.
zmax = cand.filename.split("_")[-1]
outfilenm = obs.basefilenm+"_DM%s_Z%s"%(cand.DMstr, zmax)
# Note: the following calculations should probably only be done once,
# but in general, these calculation are effectively instantaneous
# compared to the folding itself
if use_subbands: # Fold the subbands
subdms = get_all_subdms(ddplans)
subfiles = find_closest_subbands(obs, subdms, cand.DM)
foldfiles = subfiles
else: # Folding the downsampled filterbank files instead
hidms = [x.lodm for x in ddplans[1:]] + [2000]
dfacts = [x.downsamp for x in ddplans]
for hidm, dfact in zip(hidms, dfacts):
if cand.DM < hidm:
downsamp = dfact
break
if downsamp==1:
filfile = obs.fil_filenm
else:
filfile = obs.basefilenm+"_DS%d.fil"%downsamp
foldfiles = filfile
p = 1.0 / cand.f
if p < 0.002:
Mp, Mdm, N = 2, 2, 24
otheropts = "-npart 50 -ndmfact 3"
elif p < 0.05:
Mp, Mdm, N = 2, 1, 50
otheropts = "-npart 40 -pstep 1 -pdstep 2 -dmstep 3"
elif p < 0.5:
Mp, Mdm, N = 1, 1, 100
otheropts = "-npart 30 -pstep 1 -pdstep 2 -dmstep 1"
else:
Mp, Mdm, N = 1, 1, 200
otheropts = "-npart 30 -nopdsearch -pstep 1 -pdstep 2 -dmstep 1"
return "prepfold -noxwin -accelcand %d -accelfile %s.cand -dm %.2f -o %s %s -n %d -npfact %d -ndmfact %d %s" % \
(cand.candnum, cand.filename, cand.DM, outfilenm,
otheropts, N, Mp, Mdm, foldfiles)
class obs_info(object):
"""
class obs_info(fil_filenm)
A class describing the observation and the analysis.
"""
def __init__(self, fil_filenm):
self.fil_filenm = fil_filenm
self.basefilenm = fil_filenm.rstrip(".fil")
self.beam = int(self.basefilenm[-1])
filhdr, self.hdrlen = sigproc.read_header(fil_filenm)
self.orig_filenm = filhdr['rawdatafile']
self.MJD = filhdr['tstart']
self.nchans = filhdr['nchans']
self.ra_rad = sigproc.ra2radians(filhdr['src_raj'])
self.ra_string = psr_utils.coord_to_string(\
*psr_utils.rad_to_hms(self.ra_rad))
self.dec_rad = sigproc.dec2radians(filhdr['src_dej'])
self.dec_string = psr_utils.coord_to_string(\
*psr_utils.rad_to_dms(self.dec_rad))
self.az = filhdr['az_start']
self.el = 90.0-filhdr['za_start']
self.BW = abs(filhdr['foff']) * filhdr['nchans']
self.dt = filhdr['tsamp']
self.orig_N = sigproc.samples_per_file(fil_filenm, filhdr, self.hdrlen)
self.orig_T = self.orig_N * self.dt
self.N = psr_utils.choose_N(self.orig_N)
self.T = self.N * self.dt
# Update the RA and DEC from the database file if required
newposn = read_db_posn(self.orig_filenm, self.beam)
if newposn is not None:
self.ra_string, self.dec_string = newposn
# ... and use them to update the filterbank file
fix_fil_posn(fil_filenm, self.hdrlen,
self.ra_string, self.dec_string)
# Determine the average barycentric velocity of the observation
self.baryv = presto.get_baryv(self.ra_string, self.dec_string,
self.MJD, self.T, obs="AO")
# Where to dump all the results
# Directory structure is under the base_output_directory
# according to base/MJD/filenmbase/beam
self.outputdir = os.path.join(base_output_directory,
str(int(self.MJD)),
self.basefilenm[:-2],
str(self.beam))
# Figure out which host we are processing on
self.hostname = socket.gethostname()
# The fraction of the data recommended to be masked by rfifind
self.masked_fraction = 0.0
# Initialize our timers
self.rfifind_time = 0.0
self.downsample_time = 0.0
self.subbanding_time = 0.0
self.dedispersing_time = 0.0
self.FFT_time = 0.0
self.lo_accelsearch_time = 0.0
self.hi_accelsearch_time = 0.0
self.singlepulse_time = 0.0
self.sifting_time = 0.0
self.folding_time = 0.0
self.total_time = 0.0
# Inialize some candidate counters
self.num_sifted_cands = 0
self.num_folded_cands = 0
self.num_single_cands = 0
def write_report(self, filenm):
report_file = open(filenm, "w")
report_file.write("---------------------------------------------------------\n")
report_file.write("%s was processed on %s\n"%(self.fil_filenm, self.hostname))
report_file.write("Ending UTC time: %s\n"%(time.asctime(time.gmtime())))
report_file.write("Total wall time: %.1f s (%.2f hrs)\n"%\
(self.total_time, self.total_time/3600.0))
report_file.write("Fraction of data masked: %.2f%%\n"%\
(self.masked_fraction*100.0))
report_file.write("---------------------------------------------------------\n")
report_file.write(" rfifind time = %7.1f sec (%5.2f%%)\n"%\
(self.rfifind_time, self.rfifind_time/self.total_time*100.0))
if use_subbands:
report_file.write(" subbanding time = %7.1f sec (%5.2f%%)\n"%\
(self.subbanding_time, self.subbanding_time/self.total_time*100.0))
else:
report_file.write(" downsampling time = %7.1f sec (%5.2f%%)\n"%\
(self.downsample_time, self.downsample_time/self.total_time*100.0))
report_file.write(" dedispersing time = %7.1f sec (%5.2f%%)\n"%\
(self.dedispersing_time, self.dedispersing_time/self.total_time*100.0))
report_file.write(" single-pulse time = %7.1f sec (%5.2f%%)\n"%\
(self.singlepulse_time, self.singlepulse_time/self.total_time*100.0))
report_file.write(" FFT time = %7.1f sec (%5.2f%%)\n"%\
(self.FFT_time, self.FFT_time/self.total_time*100.0))
report_file.write(" lo-accelsearch time = %7.1f sec (%5.2f%%)\n"%\
(self.lo_accelsearch_time, self.lo_accelsearch_time/self.total_time*100.0))
report_file.write(" hi-accelsearch time = %7.1f sec (%5.2f%%)\n"%\
(self.hi_accelsearch_time, self.hi_accelsearch_time/self.total_time*100.0))
report_file.write(" sifting time = %7.1f sec (%5.2f%%)\n"%\
(self.sifting_time, self.sifting_time/self.total_time*100.0))
report_file.write(" folding time = %7.1f sec (%5.2f%%)\n"%\
(self.folding_time, self.folding_time/self.total_time*100.0))
report_file.write("---------------------------------------------------------\n")
report_file.close()
class dedisp_plan(object):
"""
class dedisp_plan(lodm, dmstep, dmsperpass, numpasses, numsub, downsamp)
A class describing a de-dispersion plan for prepsubband in detail.
"""
def __init__(self, lodm, dmstep, dmsperpass, numpasses, numsub, downsamp):
self.lodm = float(lodm)
self.dmstep = float(dmstep)
self.dmsperpass = int(dmsperpass)
self.numpasses = int(numpasses)
self.numsub = int(numsub)
self.downsamp = int(downsamp)
# Downsample less for the subbands so that folding
# candidates is more accurate
self.sub_downsamp = self.downsamp / 2
if self.sub_downsamp==0: self.sub_downsamp = 1
# The total downsampling is:
# self.downsamp = self.sub_downsamp * self.dd_downsamp
if self.downsamp==1: self.dd_downsamp = 1
else: self.dd_downsamp = 2
self.sub_dmstep = self.dmsperpass * self.dmstep
self.dmlist = [] # These are strings for comparison with filenames
self.subdmlist = []
for ii in range(self.numpasses):
self.subdmlist.append("%.2f"%(self.lodm + (ii+0.5)*self.sub_dmstep))
lodm = self.lodm + ii * self.sub_dmstep
dmlist = ["%.2f"%dm for dm in \
numpy.arange(self.dmsperpass)*self.dmstep + lodm]
self.dmlist.append(dmlist)
# Create our de-dispersion plans (for 100MHz WAPPs)
# The following are the "optimal" values for the 100MHz
# survey. It keeps the total dispersive smearing (i.e.
# not counting scattering <1 ms up to a DM of ~600 pc cm^-3
ddplans = []
if (1):
# The values here are: lodm dmstep dms/call #calls #subbands downsamp
ddplans.append(dedisp_plan( 0.0, 0.3, 24, 26, 32, 1))
ddplans.append(dedisp_plan( 187.2, 0.5, 24, 10, 32, 2))
ddplans.append(dedisp_plan( 307.2, 1.0, 24, 11, 32, 4))
ddplans.append(dedisp_plan( 571.2, 3.0, 24, 6, 32, 8))
else: # faster option that sacrifices a small amount of time resolution at the lowest DMs
# The values here are: lodm dmstep dms/call #calls #subbands downsamp
ddplans.append(dedisp_plan( 0.0, 0.5, 22, 21, 32, 1))
ddplans.append(dedisp_plan( 231.0, 0.5, 24, 6, 32, 2))
ddplans.append(dedisp_plan( 303.0, 1.0, 24, 11, 32, 4))
ddplans.append(dedisp_plan( 567.0, 3.0, 24, 7, 32, 8))
def main(fil_filenm, workdir):
# Change to the specified working directory
os.chdir(workdir)
# Get information on the observation and the jbo
job = obs_info(fil_filenm)
if job.T < low_T_to_search:
print("The observation is too short (%.2f s) to search."%job.T)
sys.exit()
job.total_time = time.time()
# Use whatever .zaplist is found in the current directory
default_zaplist = glob.glob("*.zaplist")[0]
# Make sure the output directory (and parent directories) exist
try:
os.makedirs(job.outputdir)
except: pass
# Create a directory to hold all the subbands
if use_subbands:
try:
os.makedirs("subbands")
except: pass
print("\nBeginning PALFA search of '%s'"%job.fil_filenm)
print("UTC time is: %s"%(time.asctime(time.gmtime())))
# rfifind the filterbank file
cmd = "rfifind -time %.17g -o %s %s > %s_rfifind.out"%\
(rfifind_chunk_time, job.basefilenm,
job.fil_filenm, job.basefilenm)
job.rfifind_time += timed_execute(cmd)
maskfilenm = job.basefilenm + "_rfifind.mask"
# Find the fraction that was suggested to be masked
# Note: Should we stop processing if the fraction is
# above some large value? Maybe 30%?
job.masked_fraction = find_masked_fraction(job)
# Iterate over the stages of the overall de-dispersion plan
dmstrs = []
for ddplan in ddplans:
# Make a downsampled filterbank file if we are not using subbands
if not use_subbands:
if ddplan.downsamp > 1:
cmd = "downsample_filterbank.py %d %s"%(ddplan.downsamp, job.fil_filenm)
job.downsample_time += timed_execute(cmd)
fil_filenm = job.fil_filenm[:job.fil_filenm.find(".fil")] + \
"_DS%d.fil"%ddplan.downsamp
else:
fil_filenm = job.fil_filenm
# Iterate over the individual passes through the .fil file
for passnum in range(ddplan.numpasses):
subbasenm = "%s_DM%s"%(job.basefilenm, ddplan.subdmlist[passnum])
if use_subbands:
# Create a set of subbands
cmd = "prepsubband -sub -subdm %s -downsamp %d -nsub %d -mask %s -o subbands/%s %s > %s.subout"%\
(ddplan.subdmlist[passnum], ddplan.sub_downsamp,
ddplan.numsub, maskfilenm, job.basefilenm,
job.fil_filenm, subbasenm)
job.subbanding_time += timed_execute(cmd)
# Now de-disperse using the subbands
cmd = "prepsubband -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d -numout %d -o %s subbands/%s.sub[0-9]* > %s.prepout"%\
(ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
ddplan.dmsperpass, ddplan.dd_downsamp, job.N/ddplan.downsamp,
job.basefilenm, subbasenm, subbasenm)
job.dedispersing_time += timed_execute(cmd)
else: # Not using subbands
cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -numdms %d -numout %d -o %s %s"%\
(maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
ddplan.dmsperpass, job.N/ddplan.downsamp,
job.basefilenm, fil_filenm)
job.dedispersing_time += timed_execute(cmd)
# Iterate over all the new DMs
for dmstr in ddplan.dmlist[passnum]:
dmstrs.append(dmstr)
basenm = job.basefilenm+"_DM"+dmstr
datnm = basenm+".dat"
fftnm = basenm+".fft"
infnm = basenm+".inf"
# Do the single-pulse search
cmd = "single_pulse_search.py -p -m %f -t %f %s"%\
(singlepulse_maxwidth, singlepulse_threshold, datnm)
job.singlepulse_time += timed_execute(cmd)
# FFT, zap, and de-redden
cmd = "realfft %s"%datnm
job.FFT_time += timed_execute(cmd)
cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
(default_zaplist, job.baryv, fftnm)
job.FFT_time += timed_execute(cmd)
cmd = "rednoise %s"%fftnm
job.FFT_time += timed_execute(cmd)
try:
os.rename(basenm+"_red.fft", fftnm)
except: pass
# Do the low-acceleration search
cmd = "accelsearch -locpow -harmpolish -numharm %d -sigma %f -zmax %d -flo %f %s"%\
(lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, fftnm)
job.lo_accelsearch_time += timed_execute(cmd)
try:
os.remove(basenm+"_ACCEL_%d.txtcand"%lo_accel_zmax)
except: pass
# Do the high-acceleration search
cmd = "accelsearch -locpow -harmpolish -numharm %d -sigma %f -zmax %d -flo %f %s"%\
(hi_accel_numharm, hi_accel_sigma, hi_accel_zmax, hi_accel_flo, fftnm)
job.hi_accelsearch_time += timed_execute(cmd)
try:
os.remove(basenm+"_ACCEL_%d.txtcand"%hi_accel_zmax)
except: pass
# Remove the .dat and .fft files
try:
os.remove(datnm)
os.remove(fftnm)
except: pass
# Make the single-pulse plots
basedmb = job.basefilenm+"_DM"
basedme = ".singlepulse "
# The following will make plots for DM ranges:
# 0-110, 100-310, 300-1000+
dmglobs = [basedmb+"[0-9].[0-9][0-9]"+basedme +
basedmb+"[0-9][0-9].[0-9][0-9]"+basedme +
basedmb+"10[0-9].[0-9][0-9]"+basedme,
basedmb+"[12][0-9][0-9].[0-9][0-9]"+basedme +
basedmb+"30[0-9].[0-9][0-9]"+basedme,
basedmb+"[3-9][0-9][0-9].[0-9][0-9]"+basedme +
basedmb+"1[0-9][0-9][0-9].[0-9][0-9]"+basedme]
dmrangestrs = ["0-110", "100-310", "300-1000+"]
psname = job.basefilenm+"_singlepulse.ps"
for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
cmd = 'single_pulse_search.py -t %f -g "%s"' % \
(singlepulse_plot_SNR, dmglob)
job.singlepulse_time += timed_execute(cmd)
try:
os.rename(psname,
job.basefilenm+"_DMs%s_singlepulse.ps"%dmrangestr)
except: pass
# Sift through the candidates to choose the best to fold
job.sifting_time = time.time()
lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%lo_accel_zmax))
if len(lo_accel_cands):
lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
if len(lo_accel_cands):
lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, numhits_to_fold,
dmstrs, low_DM_cutoff)
hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%hi_accel_zmax))
if len(hi_accel_cands):
hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
if len(hi_accel_cands):
hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, numhits_to_fold,
dmstrs, low_DM_cutoff)
all_accel_cands = lo_accel_cands + hi_accel_cands
if len(all_accel_cands):
all_accel_cands = sifting.remove_harmonics(all_accel_cands)
# Note: the candidates will be sorted in _sigma_ order, not _SNR_!
all_accel_cands.sort(key=attrgetter('sigma'), reverse=True)
sifting.write_candlist(all_accel_cands, job.basefilenm+".accelcands")
try:
cmd = "cp *.accelcands "+job.outputdir
os.system(cmd)
except: pass
job.sifting_time = time.time() - job.sifting_time
# Fold the best candidates
cands_folded = 0
for cand in all_accel_cands:
if cands_folded == max_cands_to_fold:
break
if cand.sigma > to_prepfold_sigma:
job.folding_time += timed_execute(get_folding_command(cand, job, ddplans))
cands_folded += 1
# Now step through the .ps files and convert them to .png and gzip them
psfiles = glob.glob("*.ps")
for psfile in psfiles:
if "singlepulse" in psfile:
# For some reason the singlepulse files don't transform nicely...
epsfile = psfile.replace(".ps", ".eps")
os.system("eps2eps "+psfile+" "+epsfile)
os.system("pstoimg -density 100 -crop a "+epsfile)
try:
os.remove(epsfile)
except: pass
else:
os.system("pstoimg -density 100 -flip cw "+psfile)
os.system("gzip "+psfile)
# NOTE: need to add database commands
# Tar up the results files
tar_suffixes = ["_ACCEL_%d.tgz"%lo_accel_zmax,
"_ACCEL_%d.tgz"%hi_accel_zmax,
"_ACCEL_%d.cand.tgz"%lo_accel_zmax,
"_ACCEL_%d.cand.tgz"%hi_accel_zmax,
"_singlepulse.tgz",
"_inf.tgz",
"_pfd.tgz",
"_bestprof.tgz"]
tar_globs = ["*_ACCEL_%d"%lo_accel_zmax,
"*_ACCEL_%d"%hi_accel_zmax,
"*_ACCEL_%d.cand"%lo_accel_zmax,
"*_ACCEL_%d.cand"%hi_accel_zmax,
"*.singlepulse",
"*_DM[0-9]*.inf",
"*.pfd",
"*.pfd.bestprof"]
for (tar_suffix, tar_glob) in zip(tar_suffixes, tar_globs):
tf = tarfile.open(job.basefilenm+tar_suffix, "w:gz")
for infile in glob.glob(tar_glob):
tf.add(infile)
os.remove(infile)
tf.close()
# And finish up
job.total_time = time.time() - job.total_time
print("\nFinished")
print("UTC time is: %s"%(time.asctime(time.gmtime())))
# Write the job report
job.write_report(job.basefilenm+".report")
job.write_report(os.path.join(job.outputdir, job.basefilenm+".report"))
# Copy all the important stuff to the output directory
try:
cmd = "cp *rfifind.[bimors]* *.ps.gz *.tgz *.png "+job.outputdir
os.system(cmd)
except: pass
if __name__ == "__main__":
# Arguments to the search program are
# sys.argv[1] = filterbank file name
# sys.argv[2] = working directory name
fil_filenm = sys.argv[1]
workdir = sys.argv[2]
main(fil_filenm, workdir)
| 27,092 | 43.19739 | 137 | py |
presto | presto-master/bin/quick_prune_cands.py | #!/usr/bin/env python
import sys
from presto import sifting
if len(sys.argv) < 2:
sys.stderr.write("\nusage: quick_prune_cands.py ACCEL_file_name [sigma]\n\n")
sys.exit()
if len(sys.argv)==3:
sifting.sigma_threshold = float(sys.argv[2])
cands = sifting.read_candidates([sys.argv[1]], track=True)
cands.print_cand_summary()
cands.to_file()
| 356 | 21.3125 | 82 | py |
presto | presto-master/bin/GBT350_drift_prep.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import range
import sys, os, random
from presto import sigproc
from presto import psr_utils as pu
def spigot_samples_per_file(spigot_filenm):
"""
spigot_samples_per_file(spigot_filenm,):
Return the number of samples present in the Spigot FITs file.
"""
hdrlen = 184320
bytes_per_sample = 2048
filelen = os.stat(spigot_filenm)[6]
return int((filelen-hdrlen)/bytes_per_sample)
debug = 1
if __name__=="__main__":
if (len(sys.argv) < 3):
print("usage: GBT350_drift_prep.py NUM spigot_fits_files")
print(" NUM is the 'beam' number in the scan. It starts ")
print(" with 0 and goes to NMAX. If NUM is < 0, NMAX")
print(" is sent to STDOUT by the program.")
sys.exit()
orig_N = 1728000 # Number of samples to analyze at a time (~141 sec)
raw_N = 1900000 # Number of samples to step through .fits files
overlap_factor = 0.5 # Overlap each orig_N samples by this fraction
overlap_samples = int(orig_N * overlap_factor)
nom_samps_per_file = 976896
# Now see how much data we have to work with
samples_per_file = []
infilenms = sys.argv[2:]
numinfiles = len(infilenms)
for ii in range(numinfiles):
samps = spigot_samples_per_file(infilenms[ii])
if ((samps < nom_samps_per_file) and (ii < numinfiles-1)):
print("Warning! '%s' only has %d samples!"%\
(infilenms[ii], samps))
print(" You need to fix that file!")
sys.exit(-1)
samples_per_file.append(samps)
total_samples = sum(samples_per_file)
num = int(sys.argv[1])
nmax = total_samples/overlap_samples-1
if num < 0:
print(nmax)
sys.exit(0)
if num > nmax:
print("NUM > NMAX (%d)! Exiting!"%nmax)
sys.exit(-1)
# Now figure out which file is the first
first_sample = num * overlap_samples
accum_samples = 0
for ii in range(len(samples_per_file)):
next_accum_samples = accum_samples + samples_per_file[ii]
if next_accum_samples > first_sample:
first_filenm = infilenms[ii]
# How much data to skip in the first file
skip = first_sample - accum_samples
# How many total files we need
first_file_samples = samples_per_file[ii]-skip
numfiles = (raw_N - first_file_samples) / nom_samps_per_file + 1
if ((raw_N - first_file_samples) % nom_samps_per_file):
numfiles += 1
if debug:
print("first_filenum = ", ii)
print("1st sample = ", first_sample)
print("1st filenam = ", infilenms[ii])
print("skip = ", skip)
print("1st_file_samps = ", first_file_samples)
print("numfiles = ", numfiles)
break
else:
accum_samples += samples_per_file[ii]
# Now make a command line option for spigot2filterbank
tmpfilenm = "tmp%d.fil"%random.randint(0,2**30)
cmd = "spigot2filterbank -skip %d -numout %d -o %s " % \
(skip, raw_N, tmpfilenm)
for goodfile in infilenms[ii:ii+numfiles]:
cmd += "%s "%goodfile
os.system(cmd)
# Now read the header to determine what the correct filename
# should be. Use that to rename the fil file.
filhdr, hdrlen = sigproc.read_header(tmpfilenm)
MJDi = int(filhdr['tstart'])
ra_rad = sigproc.ra2radians(filhdr['src_raj'])
ra_string = pu.coord_to_string(*pu.rad_to_hms(ra_rad))
dec_rad = sigproc.dec2radians(filhdr['src_dej'])
dec_string = pu.coord_to_string(*pu.rad_to_dms(dec_rad))
str_coords = "".join(ra_string.split(":")[:2])
if dec_rad >= 0.0: str_coords += "+"
str_coords += "".join(dec_string.split(":")[:2])
filfilenm = "GBT350drift_%d_%s.fil" % (MJDi, str_coords)
os.rename(tmpfilenm, filfilenm)
print("Renamed '%s' to '%s'." % (tmpfilenm, filfilenm))
| 4,079 | 36.777778 | 77 | py |
presto | presto-master/bin/chooseN.py | #!/usr/bin/env python
from __future__ import (print_function,division)
import presto.psr_utils as pu
import sys
from presto.infodata import infodata
if len(sys.argv) != 2:
print("chooseN <file.inf|numpoints>")
print(" Prints a good value for fast FFTs to be used for -numout in prepdata/prepsubband")
sys.exit(1)
if sys.argv[1].endswith('.inf'):
inf = infodata(sys.argv[1])
n = inf.N
else:
try:
n = int(sys.argv[1])
except:
print("chooseN <file.inf|numpoints>")
print(" Prints a good value for fast FFTs to be used for -numout in prepdata/prepsubband")
sys.exit(2)
print(pu.choose_N(n))
| 656 | 26.375 | 101 | py |
presto | presto-master/bin/fit_circular_orbit.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import range
import sys
import numpy as num
from presto import psr_utils as pu
from presto import psr_constants as pc
from presto import parfile
from presto import bestprof
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
period = num.asarray([])
time = num.asarray([])
def parse_eph(filenm):
global period, time
suffix = filenm.split(".")[-1]
if suffix=="bestprof":
x = bestprof.bestprof(filenm)
fs = pu.p_to_f(x.p0_bary, x.p1_bary, x.p2_bary)
epoch = x.epochi_bary + x.epochf_bary
T = x.T
elif suffix=="par":
x = parfile.psr_par(filenm)
# Try to see how many freq derivs we have
fs = [x.F0]
for ii in range(1, 20): # hopefully 20 is an upper limit!
attrib = "F%d"%ii
if hasattr(x, attrib):
fs.append(getattr(x, attrib))
else:
break
epoch = x.PEPOCH
T = (x.FINISH - x.START) * 86400.0
else:
print("I don't recognize the file type for", filenm)
sys.exit()
newts = epoch + num.arange(int(T/10.0+0.5), dtype=float)/8640.0
time = num.concatenate((time, newts))
newps = 1.0 / pu.calc_freq(newts, epoch, *fs)
period = num.concatenate((period, newps))
print("%13.7f (%0.1f sec): " % (epoch, T), fs)
def orbeqn(Ppxt, times):
# P = Ppsr, p = Porb, x = a*sin(i)/s, t = T_o
phi = pc.TWOPI*(times - Ppxt[3])*86400.0/Ppxt[1]
return Ppxt[0]*(1.0+pc.TWOPI*Ppxt[2]/Ppxt[1]*num.cos(phi))
def funct(Ppxt, times, measured):
return orbeqn(Ppxt, times) - measured
if __name__ == '__main__':
if len(sys.argv)==1:
print("\nusage: fit_circular_orbit.py P_psr P_orb X_orb parfiles or bestprofs")
exit(0)
Ppsr = float(sys.argv[1])
Porb = float(sys.argv[2])*86400.0
Xorb = float(sys.argv[3])
for infile in sys.argv[4:]:
parse_eph(infile)
Torb = min(time)
period = num.asarray(period, dtype=float)
time = num.asarray(time, dtype=float)
ret = leastsq(funct, [Ppsr, Porb, Xorb, Torb], args=(time, period))
To = ret[0][3]
if (ret[0][2] < 0.0):
print("Modifying TO because of negative asini/c...")
ret[0][3] += 0.5 * (ret[0][1]/86400.0)
ret[0][2] = abs(ret[0][2])
print("P_orb = %.3f hrs" % (ret[0][1]/3600.0))
print("P0 %17.15g 1" % ret[0][0])
print("PB %17.15g 1" % (ret[0][1]/86400.0))
print("A1 %17.15g 1" % ret[0][2])
print("T0 %17.15g 1" % ret[0][3])
print("E 0.0")
print("OM 0.0")
T = max(time)-min(time)
model_time = num.arange(min(time)-0.1*T, max(time)+0.1*T, 0.01)
plt.figure()
plt.plot(time-model_time[0],
(period-ret[0][0])*1000.0, '.')
plt.plot(model_time-model_time[0],
(orbeqn(ret[0], model_time)-ret[0][0])*1000.0, 'r')
plt.xlabel("Days + %.7f"%model_time[0])
plt.ylabel("Pulsar Period - %.7f (ms)"%(ret[0][0]*1000.0))
plt.show()
| 3,056 | 30.515464 | 87 | py |
presto | presto-master/bin/subband_smearing.py | #!/usr/bin/env python
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as num
import presto.psr_utils as pu
def subband_smear(DM, subDM, subBW, fctr):
"""
subband_smear(DM, subDM, subBW, fctr):
Return the smearing in ms caused by subbanding at DM='DM' given
subbands of bandwidth 'subBW' (MHz) at DM='subDM'. All values
are computed at the frequency fctr in MHz.
"""
return 1000.0 * pu.dm_smear(num.fabs(DM-subDM), subBW, fctr)
def chan_smear(DM, chanDM, chanBW, fctr):
"""
chan_smear(DM, chanDM, chanBW, fctr):
Return the smearing in ms caused by a finite channels at DM='DM'
given channels of bandwidth 'chanBW' (MHz) at DM='chanDM'. All
values are computed at the frequency fctr in MHz.
"""
return subband_smear(DM, chanDM, chanBW, fctr)
def orig_smear(DM, nchan, chanDM, BW, fctr, dt):
"""
orig_smear(DM, nchan, chanDM, BW, fctr, dt):
Return the total smearing in ms due to the sampling rate,
and the smearing over each channel.
"""
return num.sqrt((1000.0*dt)**2.0 +
chan_smear(DM, chanDM, BW/nchan, fctr)**2.0)
def total_smear(DM, nchan, chanDM, nsub, subDM,
BW, fctr, dt, downsamp):
"""
total_smear(DM, nchan, chanDM, nsub, subDM,
BW, fctr, dt, downsamp):
Return the total smearing in ms due to the original channel
format and the properties of the subbands.
"""
# the factor of two comes from integer-bin shifts when doing
# the incoherent subbanding
return num.sqrt(2 * (1000.0*dt*downsamp)**2.0 +
chan_smear(DM, chanDM, BW/nchan, fctr)**2.0 +
subband_smear(DM, subDM, BW/nsub, fctr)**2.0)
def usage():
print("""
usage: subband_smearing.py [options]
[-l loDM, --loDM=loDM] : Low DM
[-h hiDM, --hiDM=HIDM] : High DM
[-t dt, --dt=dt] : Sample time (s)
[-s subbands, --nsub=nsub] : Number of subbands
[-m subdm, --subDM=subDM] : DM of each channel
[-f fctr, --fctr=fctr] : Center frequency in MHz
[-b BW, --bw=bandwidth] : Bandwidth in MHz
[-n #chan, --nchan=#chan] : Number of channels
[-c chanDM, --chanDM=chanDM] : DM in each channel (default = 0.0)
[-d N, --downsamp=downsamp] : Integer downsample (default = 1)
""")
if __name__=='__main__':
import getopt, sys
try:
opts, args = getopt.getopt(sys.argv[1:], "l:h:t:s:m:f:b:n:c:d:",
["loDM=", "hiDM=", "dt=",
"nsub=", "subDM="
"fctr=", "bw=",
"nchan=", "chanDM=", "downsamp="])
except getopt.GetoptError:
usage()
sys.exit(2)
if len(sys.argv)==1:
usage()
sys.exit(2)
# Defaults
chanDM = 0.0
downsamp = 1
for o, a in opts:
if o in ("-l", "--loDM"):
loDM = float(a)
elif o in ("-h", "--hiDM"):
hiDM = float(a)
elif o in ("-t", "--dt"):
dt = float(a)
elif o in ("-s", "--nsub"):
nsub = int(a)
elif o in ("-m", "--subDM"):
subDM = float(a)
elif o in ("-f", "--fctr"):
fctr = float(a)
elif o in ("-b", "--bw"):
BW = float(a)
elif o in ("-n", "--nchan"):
nchan = int(a)
elif o in ("-c", "--chanDM"):
chanDM = float(a)
elif o in ("-d", "--downsamp"):
downsamp = float(a)
DMs = num.linspace(loDM, hiDM, 1000)
samp = num.ones_like(DMs) * 1000.0 * dt
dsamp = samp * downsamp
chan = chan_smear(DMs, chanDM, BW/nchan, fctr)
subband = subband_smear(DMs, subDM, BW/nsub, fctr)
orig = orig_smear(DMs, nchan, chanDM, BW, fctr, dt)
total = total_smear(DMs, nchan, chanDM, nsub, subDM,
BW, fctr, dt, downsamp)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.semilogy(DMs, samp, 'g:',
DMs, dsamp, 'g--',
DMs, chan, 'r:',
DMs, subband, 'r--',
DMs, orig, 'k:',
DMs, total, 'k')
leg = ax.legend(('Sampling time', 'Downsampling',
'Channel smear', 'Subband smear',
'Original time res', 'Total time res'),
loc='upper center')
ax.set_xlabel('Disperson Measure')
ax.set_ylabel('Smearing (ms)')
ax.set_xlim([DMs.min(), DMs.max()])
ax.set_ylim([0.5*1000.0*dt, 2.0*total.max()])
plt.show()
| 4,664 | 33.813433 | 72 | py |
presto | presto-master/bin/DDplan.py | #!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
import numpy as np
from presto.Pgplot import *
import presto.filterbank as fil
import presto.psrfits as pfits
class observation(object):
def __init__(self, dt, f_ctr, BW, numchan, cDM):
# dt in sec, f_ctr and in MHz
self.dt = dt
self.f_ctr = f_ctr
self.BW = BW
self.numchan = numchan
self.chanwidth = BW/numchan
self.cDM = cDM
def guess_dDM(self, DM):
"""
guess_dDM(self, DM):
Choose a reasonable dDM by setting the maximum smearing across the
'BW' to equal the sampling time 'dt'.
"""
return self.dt*0.0001205*self.f_ctr**3.0/(0.5*self.BW)
class dedisp_method(object):
def __init__(self, obs, downsamp, loDM, hiDM, dDM, numDMs=0,
numsub=0, numprocs=1, smearfact=2.0):
self.obs = obs
self.downsamp = downsamp
self.loDM = loDM
self.dDM = dDM
self.numsub = numsub
self.BW_smearing = BW_smear(dDM, self.obs.BW, self.obs.f_ctr)
self.numprepsub = 0
if (numsub): # Calculate the maximum subband smearing we can handle
DMs_per_prepsub = 2
while(1):
next_dsubDM = (DMs_per_prepsub+2) * dDM
next_ss = subband_smear(next_dsubDM, numsub, self.obs.BW, self.obs.f_ctr)
# The 0.8 is a small fudge factor to make sure that the subband
# smearing is always the smallest contribution
if (next_ss > 0.8*min(self.BW_smearing, 1000.0*obs.dt*downsamp)):
self.dsubDM = DMs_per_prepsub*dDM
self.DMs_per_prepsub = DMs_per_prepsub
self.sub_smearing = subband_smear(self.dsubDM, numsub,
self.obs.BW, self.obs.f_ctr)
break
DMs_per_prepsub += 2
else:
self.dsubDM = dDM
self.sub_smearing = subband_smear(self.dsubDM, numsub, self.obs.BW, self.obs.f_ctr)
# Calculate the nominal DM to move to the next method
cross_DM = self.DM_for_smearfact(smearfact)
if (cross_DM > hiDM):
cross_DM = hiDM
if (numDMs==0):
self.numDMs = int(np.ceil((cross_DM-loDM)/dDM))
if (numsub):
self.numprepsub = int(np.ceil(self.numDMs*dDM / self.dsubDM))
if (numprocs > 1 and self.numprepsub % numprocs):
# Make sure the number of "calls" is a multiple of numprocs
self.numprepsub = (self.numprepsub // numprocs + 1) * numprocs
# Now adjust DMs_per_prepsub in case numprepsub increased a lot
while (self.DMs_per_prepsub > 1 and
self.numprepsub * self.DMs_per_prepsub > self.numDMs):
self.DMs_per_prepsub -= 1
self.numDMs = self.numprepsub * self.DMs_per_prepsub
else:
self.numDMs = numDMs
# Make sure the number of DMs is divisible by the number of processors
if (numprocs > 1 and self.numDMs % numprocs):
self.numDMs = (self.numDMs // numprocs + 1) * numprocs
self.hiDM = loDM + self.numDMs*dDM
self.DMs = np.arange(self.numDMs, dtype='d')*dDM + loDM
def chan_smear(self, DM):
"""
Return the smearing (in ms) in each channel at the specified DM
"""
try:
DM = np.where(DM-cDM==0.0, cDM+self.dDM/2.0, DM)
except TypeError:
if (DM-cDM==0.0): DM = cDM+self.dDM/2.0
return dm_smear(DM, self.obs.chanwidth, self.obs.f_ctr, self.obs.cDM)
def total_smear(self, DM):
"""
Return the total smearing in ms due to the sampling rate,
the smearing over each channel, the smearing over each subband
(if numsub > 0) and the smearing over the full BW assuming the
worst-case DM error.
"""
return np.sqrt((1000.0*self.obs.dt)**2.0 +
(1000.0*self.obs.dt*self.downsamp)**2.0 +
self.BW_smearing**2.0 +
self.sub_smearing**2.0 +
self.chan_smear(DM)**2.0)
def DM_for_smearfact(self, smearfact):
"""
Return the DM where the smearing in a single channel is a factor smearfact
larger than all the other smearing causes combined.
"""
other_smear = np.sqrt((1000.0*self.obs.dt)**2.0 +
(1000.0*self.obs.dt*self.downsamp)**2.0 +
self.BW_smearing**2.0 +
self.sub_smearing**2.0)
return smearfact*0.001*other_smear/self.obs.chanwidth*0.0001205*self.obs.f_ctr**3.0 + self.obs.cDM
def DM_for_newparams(self, dDM, downsamp):
"""
Return the DM where the smearing in a single channel is causes the same smearing
as the effects of the new dosnsampling rate and dDM.
"""
other_smear = np.sqrt((1000.0*self.obs.dt)**2.0 +
(1000.0*self.obs.dt*downsamp)**2.0 +
BW_smear(dDM, self.obs.BW, self.obs.f_ctr)**2.0 +
self.sub_smearing**2.0)
return 0.001*other_smear/self.obs.chanwidth*0.0001205*self.obs.f_ctr**3.0
def plot(self, work_fract):
DMspan = self.DMs[-1]-self.DMs[0]
loDM = self.DMs[0] + DMspan*0.02
hiDM = self.DMs[-1] - DMspan*0.02
midDM = self.DMs[0] + DMspan*0.5
dt_ms = 1000.0*self.obs.dt*self.downsamp
plotxy(np.log10(self.total_smear(self.DMs)), self.DMs, width=4)
ppgplot.pgslw(1)
ppgplot.pgsch(0.8)
ppgplot.pgptxt(midDM, np.log10(1.1*self.total_smear(midDM)), 90.0, 0.0,
"%d (%.1f%%)" % (self.numDMs, 100.0*work_fract))
# Sample time
plotxy(np.log10(np.zeros(self.numDMs)+dt_ms), self.DMs, width=1, color='green')
ppgplot.pgptxt(loDM, np.log10(0.85*dt_ms), 0.0, 0.0,
"%g" % dt_ms)
# DM stepsize smearing
plotxy(np.log10(np.zeros(self.numDMs)+self.BW_smearing), self.DMs, width=1, color='red')
ppgplot.pgptxt(hiDM, np.log10(0.85*self.BW_smearing), 0.0, 1.0,
"%g" % self.dDM)
# channel smearing
plotxy(np.log10(self.chan_smear(self.DMs)), self.DMs, width=1, color='blue')
# subband smearing
if (self.numsub):
plotxy(np.log10(np.zeros(self.numDMs)+self.sub_smearing),
self.DMs, width=1, color='purple')
ppgplot.pgptxt(midDM, np.log10(0.85*self.sub_smearing), 0.0, 0.5,
"%g (%d)" % (self.dsubDM, self.numprepsub))
ppgplot.pgsci(1)
ppgplot.pgsch(1.0)
def __str__(self):
if (self.numsub):
return "%9.3f %9.3f %6.2f %4d %6.2f %6d %6d %6d " % \
(self.loDM, self.hiDM, self.dDM, self.downsamp, self.dsubDM,
self.numDMs, self.DMs_per_prepsub, self.numprepsub)
else:
return "%9.3f %9.3f %6.2f %4d %6d" % \
(self.loDM, self.hiDM, self.dDM, self.downsamp, self.numDMs)
def choose_downsamps(blocklen):
"""
choose_downsamps(blocklen):
Return a good list of possible downsample sizes given a
block of data of length blocklen spectra.
"""
# This is first cut. We will then remove redundant ones.
x = np.asarray([n for n in np.arange(1, 260) if blocklen%n==0])
if len(x)==1: return x
# Now only choose those where the ratio is between 1.5 and 2, if possible
if (x[1:]/x[:-1]).min() < 1.5:
newx = [1]
if 2 in x: newx.append(2)
if 3 in x: newx.append(3)
maxnewx = newx[-1]
while maxnewx < x[-1]:
if round(1.5*maxnewx+1e-7) in x:
newx.append(round(1.5*maxnewx+1e-7))
elif 2*maxnewx in x:
newx.append(2*maxnewx)
else:
if x[-1] > 1.5*maxnewx:
newx.append(int(x[x>1.5*maxnewx].min()))
else:
return newx
maxnewx = newx[-1]
return newx
else:
return x
def dm_smear(DM, BW, f_ctr, cDM=0.0):
"""
dm_smear(DM, BW, f_ctr, cDM=0.0):
Return the smearing in ms caused by a 'DM' over a bandwidth
of 'BW' MHz centered at 'f_ctr' MHz.
"""
return 1000.0*np.fabs(DM-cDM)*BW/(0.0001205*f_ctr**3.0)
def BW_smear(DMstep, BW, f_ctr):
"""
BW_smear(DMstep, BW, f_ctr):
Return the smearing in ms caused by a search using a DM stepsize of
'DMstep' over a bandwidth of 'BW' MHz centered at 'f_ctr' MHz.
"""
maxDMerror = 0.5*DMstep
return dm_smear(maxDMerror, BW, f_ctr)
def guess_DMstep(dt, BW, f_ctr):
"""
guess_DMstep(dt, BW, f_ctr):
Choose a reasonable DMstep by setting the maximum smearing across the
'BW' to equal the sampling time 'dt'.
"""
return dt*0.0001205*f_ctr**3.0/(0.5*BW)
def subband_smear(subDMstep, numsub, BW, f_ctr):
"""
subband_smear(subDMstep, numsub, BW, f_ctr):
Return the smearing in ms caused by a search using a subband
DM stepsize of 'subDMstep' over a total bandwidth of 'BW' MHz
centered at 'f_ctr' MHz, and having numsub subbands.
"""
if (numsub==0): return 0.0
subBW = BW/numsub
maxsubDMerror = 0.5*subDMstep
return dm_smear(maxsubDMerror, subBW, f_ctr)
def total_smear(DM, DMstep, dt, f_ctr, BW, numchan, subDMstep, cohdm=0.0, numsub=0):
"""
total_smear(DM, DMstep, dt, f_ctr, BW, numchan, subDMstep, cohdm=0.0, numsub=0):
Return the total smearing in ms due to the sampling rate,
the smearing over each channel, the smearing over each subband
(if numsub > 0) and the smearing over the full BW assuming the
worst-case DM error.
"""
return np.sqrt(2 * (1000.0*dt)**2.0 +
dm_smear(DM, BW/numchan, f_ctr, cohdm)**2.0 +
subband_smear(subDMstep, numsub, BW, f_ctr)**2.0 +
BW_smear(DMstep, BW, f_ctr)**2.0)
def dm_steps(loDM, hiDM, obs, cohdm=0.0, numsub=0, numprocs=1,
ok_smearing=0.0, blocklen=None, device="/XWIN"):
"""
dm_steps(loDM, hiDM, obs, cohdm=0.0, numsub=0, numprocs=1,
ok_smearing=0.0, blocklen=None, device="/XWIN"):
Return the optimal DM stepsizes (and subband DM stepsizes if
numsub>0) to keep the total smearing below 'ok_smearing' (in ms),
for the DMs between loDM and hiDM. If 'ok_smearing'=0.0, then
use the best values based only on the data. If the blocklen is
not None, use it to determine possible downsampling values.
And if device is not None, use it as the PGPLOT device for plotting.
"""
# Allowable DM stepsizes
allow_dDMs = [0.01, 0.02, 0.03, 0.05, 0.1, 0.2, 0.3, 0.5, 1.0,
2.0, 3.0, 5.0, 10.0, 20.0, 30.0, 50.0, 100.0, 200.0, 300.0]
# Allowable number of downsampling factors
allow_downsamps = choose_downsamps(blocklen)
# Initial values
index_downsamps = index_dDMs = 0
downsamp = allow_downsamps[index_downsamps]
dDM = allow_dDMs[index_dDMs]
dtms = 1000.0*obs.dt
# Fudge factor that "softens" the boundary defining
# if 2 time scales are equal or not
ff = 1.2
# This is the array that will hold the de-dispersion plans
methods = []
# Minimum possible smearing
min_tot_smearing = total_smear(loDM+0.5*dDM, dDM, obs.dt, obs.f_ctr,
obs.BW, obs.numchan, allow_dDMs[0], cohdm, 0)
# Minimum channel smearing
min_chan_smearing = dm_smear(np.linspace(loDM, hiDM, 10000),
obs.chanwidth, obs.f_ctr, cohdm).min()
# Minimum smearing across the obs.BW
min_BW_smearing = BW_smear(dDM, obs.BW, obs.f_ctr)
print()
print("Minimum total smearing : %.3g ms" % min_tot_smearing)
print("--------------------------------------------")
print("Minimum channel smearing : %.3g ms" % min_chan_smearing)
print("Minimum smearing across BW : %.3g ms" % min_BW_smearing)
print("Minimum sample time : %.3g ms" % dtms)
print()
ok_smearing = max([ok_smearing, min_chan_smearing, min_BW_smearing, dtms])
print("Setting the new 'best' resolution to : %.3g ms" % ok_smearing)
# See if the data is too high time resolution for our needs
if (ff*min_chan_smearing > dtms or
ok_smearing > dtms):
if (ok_smearing > ff*min_chan_smearing):
print(" Note: ok_smearing > dt (i.e. data is higher resolution than needed)")
okval = ok_smearing
else:
print(" Note: min_chan_smearing > dt (i.e. data is higher resolution than needed)")
okval = ff*min_chan_smearing
while (dtms*allow_downsamps[index_downsamps+1] < okval):
index_downsamps += 1
downsamp = allow_downsamps[index_downsamps]
print(" New dt is %d x %.12g ms = %.12g ms" % \
(downsamp, dtms, dtms*downsamp))
# Calculate the appropriate initial dDM
dDM = guess_DMstep(obs.dt*downsamp, obs.BW, obs.f_ctr)
print("Best guess for optimal initial dDM is %.3f" % dDM)
while (allow_dDMs[index_dDMs+1] < ff*dDM):
index_dDMs += 1
# If numprocs > 1, we are using mpiprepsubband, so let the
# user know
if (numprocs > 1):
print("\nAssuming we are using mpiprepsubband with %d dedispersing CPUs:"
% numprocs)
print("Each line of the dedispersion plan is one or more distinct runs of")
print("mpiprepsubband, and each 'call' is the work that a single CPU is doing.")
# Create the first method
methods = [dedisp_method(obs, downsamp, loDM, hiDM,
allow_dDMs[index_dDMs], numsub=numsub,
numprocs=numprocs)]
numDMs = [methods[-1].numDMs]
# Calculate the next methods
while(methods[-1].hiDM < hiDM):
# Determine the new downsample factor
index_downsamps += 1
downsamp = allow_downsamps[index_downsamps]
eff_dt = dtms*downsamp
# Determine the new DM step
while (BW_smear(allow_dDMs[index_dDMs+1], obs.BW, obs.f_ctr) < ff*eff_dt):
index_dDMs += 1
dDM = allow_dDMs[index_dDMs]
# Get the next method
methods.append(dedisp_method(obs, downsamp, methods[-1].hiDM,
hiDM, dDM, numsub=numsub,
numprocs=numprocs))
numDMs.append(methods[-1].numDMs)
# Calculate the DMs to search and the smearing at each
total_numDMs = sum(numDMs)
DMs = np.zeros(total_numDMs, dtype='d')
total_smears = np.zeros(total_numDMs, dtype='d')
# Calculate the DMs and optimal smearing for all the DMs
for ii, offset in enumerate(np.add.accumulate([0]+numDMs[:-1])):
DMs[offset:offset+numDMs[ii]] = methods[ii].DMs
total_smears[offset:offset+numDMs[ii]] = methods[ii].total_smear(methods[ii].DMs)
# Calculate the predicted amount of time that will be spent in searching
# this batch of DMs as a fraction of the total
work_fracts = [meth.numDMs/float(meth.downsamp) for meth in methods]
work_fracts = np.asarray(work_fracts)/sum(work_fracts)
# The optimal smearing
tot_smear = total_smear(DMs, allow_dDMs[0], obs.dt, obs.f_ctr,
obs.BW, obs.numchan, allow_dDMs[0], cohdm, 0)
if device is not None:
# Plot them
plotxy(np.log10(tot_smear), DMs, color='orange', logy=1, rangex=[loDM, hiDM],
rangey=[np.log10(0.3*min(tot_smear)), np.log10(2.5*max(tot_smear))],
labx="Dispersion Measure (pc/cm\\u3\\d)", laby="Smearing (ms)",
device=device)
ppgplot.pgsch(1.1)
ppgplot.pgsci(1)
if (numsub):
ppgplot.pgmtxt("t", 1.5, 0.6/10.0, 0.5, "\(2156)\dctr\\u = %g MHz" % obs.f_ctr)
if (dtms < 0.1):
ppgplot.pgmtxt("t", 1.5, 2.8/10.0, 0.5, "dt = %g \\gms" % (dtms*1000))
else:
ppgplot.pgmtxt("t", 1.5, 2.8/10.0, 0.5, "dt = %g ms" % dtms)
ppgplot.pgmtxt("t", 1.5, 5.0/10.0, 0.5, "BW = %g MHz" % obs.BW)
ppgplot.pgmtxt("t", 1.5, 7.2/10.0, 0.5, "N\\dchan\\u = %d" % obs.numchan)
ppgplot.pgmtxt("t", 1.5, 9.4/10.0, 0.5, "N\\dsub\\u = %d" % numsub)
else:
ppgplot.pgmtxt("t", 1.5, 1.0/8.0, 0.5, "\\(2156)\\dctr\\u = %g MHz" % obs.f_ctr)
if (dtms < 0.1):
ppgplot.pgmtxt("t", 1.5, 3.0/8.0, 0.5, "dt = %g \\gms" % (dtms*1000))
else:
ppgplot.pgmtxt("t", 1.5, 3.0/8.0, 0.5, "dt = %g ms" % dtms)
ppgplot.pgmtxt("t", 1.5, 5.0/8.0, 0.5, "BW = %g MHz" % obs.BW)
ppgplot.pgmtxt("t", 1.5, 7.0/8.0, 0.5, "N\\dchan\\u = %d" % obs.numchan)
ppgplot.pgsch(1.0)
dy = -1.4
ppgplot.pgsci(1)
ppgplot.pgmtxt("b", 6*dy, 0.97, 1.0, "Total Smearing")
ppgplot.pgsci(8)
ppgplot.pgmtxt("b", 5*dy, 0.97, 1.0, "Optimal Smearing")
ppgplot.pgsci(4)
if (cohdm):
ppgplot.pgmtxt("b", 4*dy, 0.97, 1.0, "Chan Smearing (w/ coherent dedisp)")
else:
ppgplot.pgmtxt("b", 4*dy, 0.97, 1.0, "Channel Smearing")
ppgplot.pgsci(3)
ppgplot.pgmtxt("b", 3*dy, 0.97, 1.0, "Sample Time (ms)")
ppgplot.pgsci(2)
ppgplot.pgmtxt("b", 2*dy, 0.97, 1.0, "DM Stepsize Smearing")
if (numsub):
ppgplot.pgsci(12)
ppgplot.pgmtxt("b", 1*dy, 0.97, 1.0, "Subband Stepsize Smearing (# passes)")
ppgplot.pgsci(11)
if (numsub):
print("\n Low DM High DM dDM DownSamp dsubDM #DMs DMs/call calls WorkFract")
else:
print("\n Low DM High DM dDM DownSamp #DMs WorkFract")
for method, fract in zip(methods, work_fracts):
print(method, " %.4g" % fract)
method.plot(fract)
print("\n\n")
closeplot()
return methods
dedisp_template1 = """
from __future__ import print_function
from builtins import zip
from builtins import range
import os
def myexecute(cmd):
print("'%s'"%cmd)
os.system(cmd)
# By default, do not output subbands
outsubs = False
"""
dedisp_template2 = """
# Loop over the DDplan plans
for dDM, dsubDM, dmspercall, downsamp, subcall, startDM in zip(dDMs, dsubDMs, dmspercalls, downsamps, subcalls, startDMs):
# Loop over the number of calls
for ii in range(subcall):
subDM = startDM + (ii+0.5)*dsubDM
loDM = startDM + ii*dsubDM
if outsubs:
# Get our downsampling right
subdownsamp = downsamp // 2
datdownsamp = 2
if downsamp < 2: subdownsamp = datdownsamp = 1
# First create the subbands
myexecute("prepsubband -sub -subdm %.2f -nsub %d -downsamp %d -o %s %s" %
(subDM, nsub, subdownsamp, basename, rawfiles))
# And now create the time series
subnames = basename+"_DM%.2f.sub[0-9]*"%subDM
myexecute("prepsubband -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d -o %s %s" %
(loDM, dDM, dmspercall, datdownsamp, basename, subnames))
else:
myexecute("prepsubband -nsub %d -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d -o %s %s" %
(nsub, loDM, dDM, dmspercall, downsamp, basename, rawfiles))
"""
def usage():
print("""
usage: DDplan.py [options] [raw PSRFITS or filterbank file]
[-h, --help] : Display this help
[-o outfile, --outfile=outfile] : Output .eps plot file (default is xwin)
[-l loDM, --loDM=loDM] : Low DM to search (default = 0 pc cm-3)
[-d hiDM, --hiDM=HIDM] : High DM to search (default = 1000 pc cm-3)
[-f fctr, --fctr=fctr] : Center frequency (default = 1400MHz)
[-b BW, --bw=bandwidth] : Bandwidth in MHz (default = 300MHz)
[-n #chan, --numchan=#chan] : Number of channels (default = 1024)
[-k blocklen, --blocklen=#spec] : Spectra per subint (for downsampling) (default = 1024)
[-c cDM, --cohdm=cDM] : Coherent DM in each chan (default = 0.0)
[-t dt, --dt=dt] : Sample time (s) (default = 0.000064 s)
[-s #subbands, --subbands=nsub] : Number of subbands (default = #chan)
[-p #procs, --procs=nprocs] : # CPUs dedispersing for mpiprepsubband (default = 1)
[-r resolution, --res=res] : Acceptable time resolution (ms)
[-w, --write] : Write a dedisp.py file for the plan
The program generates a good plan for de-dispersing raw data. It
trades a small amount of sensitivity in order to save computation costs.
It will determine the observation parameters from the raw data file
if it exists.
""")
if __name__=='__main__':
import sys
import os
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "hwo:l:d:f:b:n:k:c:t:s:p:r:",
["help", "write", "output=", "loDM=", "hiDM=",
"fctr=", "bw=", "numchan=", "blocklen=",
"cDM=", "dt=", "subbands=", "procs=", "res="])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
if len(sys.argv)==1:
usage()
sys.exit(2)
# The defaults are close to the future ALFA survey
loDM, hiDM = 0.0, 1000.0
fctr = 1400.0
BW = 300.0
numchan = 1024
numsubbands = 0
numprocs = 1
dt = 0.000064
cDM = 0.0
ok_smearing = 0.0
device = "/xwin"
write_dedisp = False
blocklen = 1024
if len(args):
fname, ext = os.path.splitext(args[0])
if ext==".fil": # Assume it is filterbank
try:
hdr, hdr_size = fil.read_header(args[0])
dt = hdr['tsamp']
numchan = hdr['nchans']
BW = np.fabs(hdr['foff']) * numchan
fctr = hdr['fch1'] + 0.5 * hdr['foff'] * numchan - 0.5 * hdr['foff']
blocklen = 2400 # from $PRESTO/src/sigproc_fb.c (spectra_per_subint)
print("""
Using:
dt = %g s
numchan = %d
blocklen = %d
BW = %g MHz
fctr = %g MHz
from '%s'
""" % (dt, numchan, blocklen, BW, fctr, args[0]))
except:
print("Cannot read '%s' as SIGPROC filterbank. Ignoring."%args[0])
else: # Assume it is PSRFITS
try:
pf = pfits.PsrfitsFile(args[0])
dt = pf.tsamp
numchan = pf.nchan
fctr = pf.header["OBSFREQ"]
BW = numchan * np.fabs(pf.specinfo.df)
blocklen = pf.specinfo.spectra_per_subint
print("""
Using:
dt = %g s
numchan = %d
blocklen = %d
BW = %g MHz
fctr = %g MHz
from '%s'
""" % (dt, numchan, blocklen, BW, fctr, args[0]))
if "CHAN_DM" in pf.header:
cDM = pf.header["CHAN_DM"]
if cDM != 0.0:
print("And assuming channels coherently dedispersed at DM = %g pc/cm^3"%cDM)
except:
print(sys.exc_info()[0])
print("Cannot read '%s' as PSRFITS. Ignoring."%args[0])
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-w", "--write"):
write_dedisp = True
if o in ("-o", "--output"):
device = a
if not (device.endswith(".eps") or device.endswith(".ps")):
device += ".eps"
if not (device.endswith("/CPS") or device.endswith("/VCPS") or
device.endswith("/cps") or device.endswith("/vcps")):
device += "/CPS"
if o in ("-l", "--loDM"):
loDM = float(a)
if o in ("-d", "--hiDM"):
hiDM = float(a)
if o in ("-f", "--fctr"):
fctr = float(a)
if o in ("-b", "--bw"):
BW = float(a)
if o in ("-n", "--numchan"):
numchan = int(a)
if o in ("-k", "--blocklen"):
blocklen = int(a)
if o in ("-t", "--dt"):
dt = float(a)
if o in ("-c", "--cohdm"):
cDM = float(a)
if o in ("-s", "--subbands"):
numsubbands = int(a)
if o in ("-p", "--procs"):
numprocs = int(a)
if o in ("-r", "--res"):
ok_smearing = float(a)
# The following is an instance of an "observation" class
obs = observation(dt, fctr, BW, numchan, cDM)
if write_dedisp: # Always use subbands if writing a dedisp routine
if numsubbands==0:
divs = [20, 16, 15, 12, 10, 9, 8, 7, 6, 5, 4, 3]
for div in divs[::-1]:
if numchan % div == 0 and numchan // div >= 32:
numsubbands = numchan // div
if numsubbands==0:
numsubbands = numchan
print("Setting nsub to %d since writing a dedisp.py file"%numsubbands)
# The following function creates the de-dispersion plan
# The ok_smearing values is optional and allows you to raise the floor
# and provide a level of smearing that you are willing to accept (in ms)
methods = dm_steps(loDM, hiDM, obs, cDM, numsubbands, numprocs,
ok_smearing, blocklen, device)
if write_dedisp:
dDMs = [m.dDM for m in methods]
dsubDMs = [m.dsubDM for m in methods]
startDMs = [m.loDM for m in methods]
downsamps = [m.downsamp for m in methods]
dmspercalls = [m.DMs_per_prepsub for m in methods]
subcalls = [m.numprepsub for m in methods]
basename, ext = os.path.splitext(args[0])
with open('dedisp_%s.py'%basename, 'w') as f:
f.write(dedisp_template1)
f.write("nsub = %d\n\n"%numsubbands)
f.write("basename = %s\n"%repr(basename))
f.write("rawfiles = %s\n\n"%repr(args[0]))
f.write("""# dDM steps from DDplan.py
dDMs = %s\n"""%repr(dDMs))
f.write("""# dsubDM steps
dsubDMs = %s\n"""%repr(dsubDMs))
f.write("""# downsample factors
downsamps = %s\n"""%repr(downsamps))
f.write("""# number of calls per set of subbands
subcalls = %s\n"""%repr(subcalls))
f.write("""# The low DM for each set of DMs
startDMs = %s\n"""%repr(startDMs))
f.write("""# DMs/call
dmspercalls = %s\n"""%repr(dmspercalls))
f.write(dedisp_template2)
# The following is an instance of an "observation" class
# Here's one for a "best" resolution GBT search using the SPIGOT
# Check out how many DMs you need! Cool. ;-)
# dt f_ctr BW numchan
#obs = observation(0.00008192, 350.0, 50.0, 2048)
#dm_steps(0.0, 500.0, obs, ok_smearing=0.3) # Create an X-window is the default
#dm_steps(0.0, 500.0, obs, ok_smearing=0.3, device="GBT_350_DD.ps/CPS")
# Example for Parkes survey
#obs = observation(0.000250, 1374.0, 288.0, 96)
#dm_steps(0.0, 500.0, obs) # Create an X-window is the default
#dm_steps(0.0, 1500.0, obs, device="PKMB_DD.ps/CPS")
| 27,675 | 40.806647 | 122 | py |
presto | presto-master/bin/quickffdots.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import range
from presto.infodata import *
from presto.presto import ffdot_plane, spectralpower
from pylab import *
import numpy as N
import sys
numharm = 4
# Contour values as a fraction of max in "window"
rel_convals = N.asarray([0.5, 0.8, 0.95, 0.995])
rel_alphas = N.asarray([0.3, 0.5, 0.7, 1.0])
# Absolute power values for contours
abs_convals = N.asarray([5.0, 10.0, 20.0, 40.0, 80.0, 160.0, 1e6])
abs_alphas = N.asarray([0.1, 0.3, 0.4, 0.5, 0.65, 0.8, 1.0])
if len(sys.argv) < 3:
print("usage: quickffdots.py fftfile freq(Hz)")
sys.exit(0)
if 0:
convals = rel_convals
alphas = rel_alphas
else:
convals = abs_convals
alphas = abs_alphas
if (numharm > 6):
print("Numharm must be < 6!")
sys.exit(0)
# Each of the harmonics must have an even number of bins interpolated
# (i.e. integer number of interpolated pointed between the integer bins)
drs = {1: 1.0/4, 2: 1.0/8, 3: 1.0/12, 4: 1.0/12, 5: 1.0/60, 6: 1.0/60}
# The following are for the lowest harmonic
zmax = 20
dr = drs[numharm]
dz = dr * 4.0
numrs = int(round(4*zmax/dr))
numzs = int(round(2*zmax/dz)) + 1
infilenm = sys.argv[1]
infile = open(infilenm, 'rb')
idata = infodata(infilenm[:-4]+".inf")
idata.T = idata.N * idata.dt
ctrr = int(round(float(sys.argv[2]) * idata.T))
startr = int(ctrr - numrs/2 * dr)
ctrfreq = ctrr / idata.T
ffdps = []
maxvals = []
maxargs = []
for harmnum in range(1, numharm+1):
print("Computing harmonic", harmnum)
ldr = dr * harmnum
ldz = dz * harmnum
lor = startr * harmnum
loz = 0.0 - (numzs-1)/2 * ldz
rs = N.arange(numrs) * ldr + lor
zs = N.arange(numzs) * ldz + loz
if harmnum==1:
rs0, zs0 = rs[:], zs[:]
lo_file_r = int(rs.min()) - 1000
hi_file_r = int(rs.max()) + 1000
# Read and normalize the raw spectrum
infile.seek(lo_file_r * 8, 0)
fftamps = N.fromfile(infile, 'F', hi_file_r-lo_file_r+1)
fftpows = spectralpower(fftamps)
pownorm = 1.0 / (1.442695 * N.median(fftpows))
fftamps *= sqrt(pownorm)
ffd = ffdot_plane(fftamps, lor-lo_file_r, ldr, numrs, loz, ldz, numzs)
ffd_pows = (ffd * ffd.conj()).real
ffdps.append(ffd_pows)
if (harmnum==1):
sumpows = N.zeros_like(ffd_pows)
sumpows += ffd_pows
argmax = ffd_pows.argmax()
maxvals.append(ffd_pows.max())
maxargs.append((argmax // numrs, argmax % numrs))
print(" Maximum power for harmonic %d = %.2f"%(harmnum, maxvals[-1]))
if (convals.max() < 1.5): # Using relative contours
print("Using relative contours..")
pow_contours = convals * maxvals[-1]
else:
# Only choose the contours with values < the max power
highcut = N.compress(abs_convals > maxvals[-1],
N.arange(len(abs_convals))).min()
pow_contours = N.empty(highcut+1, dtype=float)
pow_contours[:highcut] = abs_convals[:highcut]
pow_contours[highcut] = 1e6
alphas = abs_alphas[:highcut+1]
if harmnum==1: # 'Red'
colorvals = [(alpha, 0.0, 0.0) for alpha in alphas]
elif harmnum==2: # 'Green'
colorvals = [(0.0, alpha, 0.0) for alpha in alphas]
elif harmnum==3: # 'Blue'
colorvals = [(0.0, 0.0, alpha) for alpha in alphas]
elif harmnum==4: # 'Cyan'
colorvals = [(0.0, alpha, alpha) for alpha in alphas]
elif harmnum==5: # 'Magenta'
colorvals = [(alpha, 0.0, alpha) for alpha in alphas]
elif harmnum==6: # 'Yellow'
colorvals = [(alpha, alpha, 0.0) for alpha in alphas]
limits = [rs0.min(), rs0.max(), zs0.min(), zs0.max()]
cstr = "".join(["%.2f "%x for x in pow_contours[:-1]])
print(" Contour levels at powers = "+cstr)
# Plot the contours
contour(ffd_pows, pow_contours, origin='lower',
alpha=0.3, colors=colorvals, extent=limits)
# Plot fill between the last two contours
contourf(ffd_pows, [pow_contours[-2], pow_contours[-1]],
origin='lower', colors=[colorvals[-1]], alpha=0.3,
extent=limits)
xlabel("Average Fourier Frequency (bins)")
ylabel("Fourier Frequency Derivative z (bins)")
if harmnum==1:
axhline(0.0, linewidth=1, color='black', alpha=0.3)
print("\nMax summed power = %.2f"%(sumpows.max()))
argmax = sumpows.argmax()
maxr = rs0[argmax % numrs]
maxz = zs0[argmax // numrs]
maxf = maxr/idata.T
maxfd = maxz/(idata.T*idata.T)
initf = (maxr - 0.5*maxz)/idata.T
print(" at r =", maxr, " (%.10f Hz)"%maxf)
print(" at z =", maxz, " (%.10g Hz/s)"%maxfd)
print("Folding command would be: ")
print(" prepfold -f %.10f -fd %.6g ..." %(initf, maxfd))
infile.close()
show()
| 4,717 | 30.878378 | 74 | py |
presto | presto-master/bin/rrattrap.py | ../python/presto/singlepulse/rrattrap.py | 40 | 40 | 40 | py |
presto | presto-master/bin/makezaplist.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
import sys
from operator import attrgetter
from presto.presto import read_inffile, binary_velocity, psrepoch
class bird(object):
def __init__(self, freq, width, bary=0):
self.freq = freq
self.width = width
self.bary = bary
def __str__(self):
if self.bary:
return "B %20.15g %20.15g\n" % (self.freq, self.width)
else:
return " %20.15g %20.15g\n" % (self.freq, self.width)
def processbirds(filename):
# Filename should be the rootfilename of the FFT + '.birds'
try:
ii = filename.index(".birds")
except ValueError:
print("\nThe birdie filename must end in '.birds'\n")
sys.exit(0)
else:
rootname = filename[:ii]
psrs = 0
freqs = 0
trains = 0
birds = []
print("\nProcessing the birds...")
info = read_inffile(rootname)
T = info.dt * info.N;
# PSRs get 40 bins minimum zapped (overkill for most,
# but required for the _really_ bright ones
min_psr_width = 40.0 / T
file = open(filename, "r")
for line in file.readlines():
if (len(line)<=3 or line[0]=='#'):
continue
elif (line[0]=='P'):
(tmp, psrname, numharm) = line.split()
numharm = int(numharm)
psr = psrepoch(psrname, info.mjd_i+info.mjd_f)
if (psr.orb.p):
(minv, maxv) = binary_velocity(T, psr.orb)
psrs += 1
for harm in range(1, numharm+1):
if (psr.orb.p):
midv = 0.5 * (maxv + minv)
midf = (1.0 + midv) * psr.f * harm
width = (maxv - minv) * psr.f * harm
if (0.1 * width < min_psr_width):
width = width + min_psr_width
else:
width = width * 1.1
else:
midf = psr.f * harm
width = min_psr_width
birds.append(bird(midf, width, bary=1))
else:
words = line.split()
increase_width = 0
bary = 0
if (len(words) >= 3):
freq = float(words[0])
width = float(words[1])
numharm = int(words[2])
if (len(words) >= 4):
increase_width = int(words[3])
if (len(words) >= 5):
bary = int(words[4])
trains += 1
if (increase_width):
for harm in range(1, numharm+1):
birds.append(bird(freq * harm, width * harm, bary))
else:
for harm in range(1, numharm+1):
birds.append(bird(freq * harm, width, bary))
else:
freqs += 1
birds.append(bird(float(words[0]), float(words[1])))
print("\nRead %d freqs, %d pulsars, and %d harmonic series." % \
(freqs, psrs, trains))
print("Total number of birdies = %d" % (len(birds)))
birds.sort(key=attrgetter('freq'))
file.close()
file = open(rootname+".zaplist", "w")
file.write("# This file created automatically with makebirds.py\n")
file.write("# Lines beginning with '#' are comments\n")
file.write("# Lines beginning with 'B' are barycentric freqs (i.e. PSR freqs)\n")
file.write("# %20s %20s\n" % ("Freq", "Width"))
file.write("# %s %s\n" % ("-"*20, "-"*20))
for birdie in birds:
file.write(str(birdie))
file.close()
print("\nWrote '%s'\n" % (rootname+".zaplist"))
if __name__ == '__main__':
if len(sys.argv)==1:
print("\nusage: makezaplist.py birdsfilename")
print(" Note: 'birdsfilename' must end in '.birds'\n")
print(" and a related infofile ('.inf') must exist.\n")
else:
processbirds(sys.argv[1])
| 4,050 | 35.495495 | 85 | py |
presto | presto-master/bin/injectpsr.py | #!/usr/bin/env python
"""Inject a fake pulsar into real data, creating
a filterbank file.
Patrick Lazarus, June 26, 2012
"""
from __future__ import print_function
from builtins import zip
from builtins import object
import sys
import argparse
import warnings
import copy
import numpy as np
import scipy.integrate
import scipy.interpolate
import matplotlib
matplotlib.use('agg') # Use a non-interactive backend
import matplotlib.pyplot as plt
import scipy.integrate
from presto import filterbank
from presto import psr_utils
DEBUG = False # Print debugging messages
NUMSECS = 1.0 # Number of seconds of data to use to determine global scale
# when repacking floating-point data into integers
BLOCKSIZE = 1e4 # Number of spectra to manipulate at once
NUMPOINTS = 512 # Number of points to use for spline profiles when applying DM
NINTEG_PER_BIN = 1 # Number of points to average integrate per time bin
def integrate_phase_bin(prof_func, phs, dphs, nseg=1):
intervals = np.linspace(0, dphs, nseg+1, endpoint=True)
edges = intervals+np.asarray(phs)[...,np.newaxis]
return np.trapz(prof_func(edges), edges, axis=-1)
class Profile(object):
"""A class to represent a generic pulse profile.
"""
def __init__(self, prof_func, scale=1):
"""Construct a profile.
Inputs:
prof_func: A function of a single variable.
This function should:
1) Represent the pulse profile.
2) Expect input values of phase ranging between
0 and 1.
3) Work when provided with a numpy array.
scale: An overall scaling factor to multiply
the profile by.
Output:
prof: The profile object.
"""
self.prof_func = prof_func
self.scale = scale
def __call__(self, phs):
"""Return the value of the profile at the given phase.
Inputs:
phs: The phase of the profile (between 0 and 1) where
the profile should be evaluated.
Output:
vals: The values of the profile at the requested phases.
"""
profvals = self.prof_func(phs)
return self.scale*profvals
def plot(self, nbin=1024, scale=1):
x0 = np.linspace(0, 1.0, nbin+1, endpoint=True)
plt.plot(x0, self(x0)*scale)
plt.xlabel("Phase")
def get_area(self, npts=4096, **kwargs):
"""Return the area under the pulse in units of (intensity x phase).
The area is calculated by evaluating the profile at many points
and numerically integrated using the trapezoid rule.
NOTE: the scale-factor will be applied.
Input:
npts: The number of points to use when evaluating the
profile.
Ouput:
area: The area under the pulse in units of (intensity x phase).
"""
phs = np.linspace(0, 1.0, npts+1, endpoint=True)
area = np.trapz(y=self(phs), x=phs)
#area, err = scipy.integrate.quadrature(self, 0, 1, maxiter=250)
return area
def get_max(self, npts=4096):
"""Return the maximum value of the profile.
The profile is evaluated at many points. The quantity returned
is the maximum value evaluated.
NOTE: the scale-factor will be applied.
Inputs:
npts: The number of points to use when evaluating the
profile.
Ouput:
profmax: The profile maximum.
"""
phs = np.linspace(0, 1.0, npts+1, endpoint=True)
profmax = np.max(self(phs))
return profmax
def get_fwhm(self, npts=4096):
"""Determine and return the FWHM of the profile, in phase.
This only works if two points in the profile are at half-maximum,
and all points in between are larger than half-max.
Input:
npts: The number of points to use when evaluating the
profile.
Ouput:
fwhm: The full-width at half-maximum of the profile, in phase.
"""
phs = np.linspace(0, 1.0, npts+1, endpoint=True)
vals = self(phs)
profmax = np.max(self(phs))
halfmax = profmax/2.0
ma = np.ma.masked_less(vals, halfmax)
# Unmasked region has vals >= halfmax
unmasked = np.ma.notmasked_contiguous(ma)
if len(unmasked) == 1:
sl = unmasked[0]
return ((sl.stop-1) - (sl.start))/float(npts)
elif len(unmasked) == 2:
sl1 = unmasked[0]
sl2 = unmasked[1]
if sl1.start==0 and sl2.stop==len(phs):
# profile straddles phase=0
return (npts+(sl1.stop-1) - sl2.start)/float(npts)
else:
return None
def get_equivalent_width(self, npts=4096):
"""Determine and return the equivalent width of the profile, in phase.
The equivalent width is the area under the pulse divided
by the profile's maximum value.
Input:
npts: The number of points to use when evaluating the
profile.
Ouput:
weq: The equivalent width of the profile, in phase.
"""
return self.get_area(npts=npts)/self.get_max(npts=npts)
def set_scaling(self, scale):
"""Set the profile's scaling factor.
Input:
scale: The scaling factor to use.
Outputs:
None
"""
self.scale = scale
def delay(self, phasedelay):
"""Delay the profile and return a new Profile object.
Input:
phasedelay: The amount of phase to delay the profile by.
Output:
delayed: The delayed Profile.
"""
delayed_prof = Profile(lambda ph: self((ph-phasedelay) % 1)/self.scale, \
scale=self.scale)
return delayed_prof
def convolve_with(self, other, npts=4096, conserve_area=True):
"""Convolve Profile with another. Return a SplineProfile
with the requested number of points.
Inputs:
other: The Profile to convolve with.
npts: The number of points to use when creating the
resulting convolution (i.e. a SplineProfile).
(Default: 4096)
conserve_area: If true, scale the covolution such that
it has the same area as the input profile (i.e. 'self')
Other:
convolution: The convolution, a SplineProfile object.
"""
phs = np.linspace(0, 1, npts, endpoint=False)
conv_vals = np.fft.irfft(np.fft.rfft(self(phs))*np.fft.rfft(other(phs)))
if conserve_area:
# scale convolution such that area is same as before
# Isn't this already a property of convolutions? Should it always be true?
scale = self.get_area()/np.trapz(conv_vals, phs)
convolution = SplineProfile(conv_vals*scale)
return convolution
def smear(self, smearphs, delayphs=0, npts=4096):
"""Smear the profile with a boxcar of width 'smearphs'. Return
a SplineProfile object sampled with 'npts' points.
Inputs:
smearphs: The amount (in phase) to smear the profile by.
delayphs: The amount (in phase) to delay the pulse by.
(Default: No delay)
npts: The number of points to use when creating the
smeared SplineProfile. (Default: 4096)
Output:
smeared: The smeared Profile.
"""
# todo: (gijs) bug, scatterphs not defined
if smearphs < 0:
raise ValueError("Amount of phase to smear by (%g) " \
"cannot be negative!" % scatterphs)
elif smearphs == 0:
# No scattering to do, return profile
return copy.deepcopy(self)
bc = boxcar_factory(smearphs, delayphs)
return self.convolve_with(bc, npts=npts)
def scatter(self, scatterphs, npts=4096):
"""Scatter the profile with a one-sided exponential of width
'scatterphs'. Return a SplineProfile object sampled
with 'npts' points.
Inputs:
scatterphs: The time-scale (in phase) of the exponential
scattering function.
npts: The number of points to use when creating the scattered
SplineProfile. (Default: 4096)
Outputs:
scattered: The scattered Profile.
"""
if scatterphs < 0:
raise ValueError("Amount of phase to scatter by (%g) " \
"cannot be negative!" % scatterphs)
elif scatterphs == 0:
# No scattering to do, return profile
return copy.deepcopy(self)
ex = exponential_factory(scatterphs)
return self.convolve_with(ex, npts=npts)
class SplineProfile(Profile):
def __init__(self, profvals, scale=1, **spline_kwargs):
"""Construct a profile that uses a spline to interpolate a function.
Inputs:
profvals: The values of the profile to be interpolated.
scale: An overall scaling factor to multiply
the profile by.
**All additional keyword arguments are passed to the
spline constructor.
Output:
prof: The profile object.
"""
self.profvals = profvals
# TODO: Should we evaluate at the centre of the bins?
phs = np.linspace(0,1, len(profvals)+1, endpoint=True)
# Manually set value at phs=1.0 to the value at phs=0.0
vals = np.concatenate((profvals, [profvals[0]]))
# Create spline object and use it as the profile function
spline = scipy.interpolate.InterpolatedUnivariateSpline(phs, \
vals, **spline_kwargs)
super(SplineProfile, self).__init__(spline, scale)
def __call__(self, phs):
"""Return the value of the profile at the given phase.
Inputs:
phs: The phase of the profile (between 0 and 1) where
the profile should be evaluated.
Output:
vals: The values of the profile at the requested phases.
"""
vals = super(SplineProfile, self).__call__(phs.flat)
# Re-shape values because spline return flattened array.
vals = np.atleast_1d(vals)
vals.shape = phs.shape
return vals
class MultiComponentProfile(Profile):
"""A class to represent a pulse profile made up of
multiple components.
"""
def __init__(self, components=None, scale=1):
"""Construct a multi-component profile.
Input:
components: A list of Profile objects that serve
as the components of this MultiComponentProfile
object. (Default: Create a multi-component profile
with no components.)
scale: An overall scaling factor to multiply
the profile by.
Output:
prof: The MultiComponentProfile object.
"""
self.scale = scale
self.components = []
for component in components:
self.add_component(component)
super(MultiComponentProfile, self).__init__(self._get_profile(), scale)
def _get_profile(self):
"""Private method to get the pulse profile vs. phase
function.
"""
if self.components:
prof = lambda ph: np.sum([comp(ph) for comp \
in self.components], axis=0)
else:
prof = lambda ph: 0
return prof
def add_component(self, comp):
self.components.append(comp)
def plot(self, nbin=1024):
super(MultiComponentProfile, self).plot(nbin=nbin)
for comp in self.components:
comp.plot(nbin=nbin, scale=self.scale)
class VectorProfile(object):
"""A class to represent a vector of pulse profiles.
This can be used to encode intrinsic profile variation,
or extrisinc smearing/scattering across the band
"""
def __init__(self, profiles, scales=1):
"""Construct a vector of profiles.
Inputs:
profiles: A list of Profile objects.
scales: A numpy array of scaling factors to multiply
the profile vector by when evaluating.
Output:
prof: The profile object.
"""
self.profiles = profiles
self.nprofs = len(profiles)
self.scales = scales
def __call__(self, phs):
phs = np.atleast_1d(np.asarray(phs))
nphs = phs.shape[-1]
vals = np.zeros((nphs, self.nprofs))
if phs.ndim == 1:
# Evaluate all profiles at the same phases
for ii, (prof, scale) in enumerate(zip(self.profiles, self.scales)):
# If scale is not a finite number don't inject anything for this
# channel
if np.isfinite(scale):
vals[:,ii] = prof(phs)*scale
elif phs.ndim == 2:
# Evaluate each profile at a different set of phases
nphs_vecs = phs.shape[0]
if nphs_vecs != self.nprofs:
raise ValueError("Length of axis=1 of 'phs' (%d) must be " \
"equal to the number of profiles in the " \
"vector (%d)." % (nphs_vecs, self.nprofs))
for ii, (prof, ph, scale) in enumerate(zip(self.profiles, phs, self.scales)):
# If scale is not a finite number don't inject anything for this
# channel
if np.isfinite(scale):
vals[:,ii] = prof(ph)*scale
else:
raise ValueError("VectorProfile can only be evaluated with " \
"1D or 2D arrays")
return vals
def set_scaling(self, scales):
"""Set the profiles' scaling factors.
Input:
scales: The scaling factors to use.
Outputs:
None
"""
arr = np.asarray(scales)
if arr.ndim not in (0, 1):
raise ValueError("Bad number of dimensions for 'scales'. " \
"Number of dimensions must be 0 or 1. " \
"Scales provided has %d dimensions." % arr.ndim)
self.scales = arr
def get_area(self, *args, **kwargs):
"""Return the area under the pulse in units of (intensity x phase).
The area is calculated by evaluating the profile at many points
and numerically integrated using the trapezoid rule.
NOTE: the scale-factor will be applied.
Input:
npts: The number of points to use when evaluating the
profile.
Ouput:
area: The area under the pulse in units of (intensity x phase).
"""
areas = np.zeros(len(self.profiles))
for ii, prof in enumerate(self.profiles):
areas[ii] = prof.get_area(*args, **kwargs)
return areas
def plot(self, nbin=1024, scale=1):
phs = np.linspace(0, 1.0, nbin+1, endpoint=True)
data = self(phs).transpose()
imax = plt.axes((0.1, 0.1, 0.6, 0.8))
plt.imshow(data, interpolation='nearest', \
extent=(0, 1, 0, self.nprofs), aspect='auto')
plt.xlabel("Phase")
plt.ylabel("Channel number")
plt.axes((0.7, 0.1, 0.25, 0.8), sharey=imax)
plt.plot(np.sum(data, axis=1)[::-1], np.arange(self.nprofs), 'k-')
# Re-set axes for image
imax.set_xlim(0, 1)
imax.set_ylim(0, self.nprofs)
class DispersedProfile(VectorProfile):
def __init__(self, profiles, dm, freqs, period, scales=1, intrinsic=None, delayed=True):
super(DispersedProfile, self).__init__(profiles, scales)
self.dm = dm
self.freqs = freqs
self.period = period
self.intrinsic = intrinsic
self.delayed = delayed
def get_equivalent_width(self, *args, **kwargs):
return np.ones_like(self.freqs)*self.intrinsic.get_equivalent_width(*args, **kwargs)
def get_area(self, *args, **kwargs):
return np.ones_like(self.freqs)*self.intrinsic.get_area(*args, **kwargs)
def get_max(self, *args, **kwargs):
return np.ones_like(self.freqs)*self.intrinsic.get_max(*args, **kwargs)
def plot(self, nbin=1024, scale=1, dedisp=False):
phs = np.linspace(0, 1.0, nbin+1, endpoint=True)
data = self(phs).transpose()
if self.delayed:
delays = get_phasedelays(self.dm, self.freqs, self.period)
else:
delays = np.zeros_like(self.freqs)
delayedphs = (phs+delays[:,np.newaxis]) % 1
dedispdata = self(delayedphs).transpose()
imax = plt.axes((0.1, 0.1, 0.6, 0.6))
if dedisp:
plt.imshow(dedispdata, interpolation='nearest', \
extent=(0, 1, 0, self.nprofs), aspect='auto')
else:
plt.imshow(data, interpolation='nearest', \
extent=(0, 1, 0, self.nprofs), aspect='auto')
plt.set_cmap('gist_yarg')
plt.xlabel("Phase")
plt.ylabel("Channel number")
plt.axes((0.7, 0.1, 0.25, 0.6), sharey=imax)
plt.plot(np.sum(data, axis=1)[::-1], np.arange(self.nprofs), 'k-')
plt.axes((0.1, 0.7, 0.6, 0.25), sharex=imax)
plt.plot(phs, np.sum(dedispdata, axis=0), ls='-',
c='k', lw=1, label='Smeared and scattered', zorder=2)
if self.intrinsic is not None:
plt.plot(phs, self.intrinsic(phs)*np.ma.masked_invalid(self.scales).sum(),
ls='-', c='#bbbbbb', lw=3, label='Input', zorder=1)
plt.legend(loc='best', prop=dict(size='small'))
plt.figtext(0.05, 0.05, "Period = %.3f ms" % (self.period*1000), size='xx-small')
plt.figtext(0.05, 0.035, r"DM = %.3f cm$\mathrm{^{-3}}$pc" % self.dm, size='xx-small')
# Re-set axes for image
imax.set_xlim(0, 1)
imax.set_ylim(0, self.nprofs)
def get_phasedelays(dm, freqs, period):
"""Return phase delays corresponding to a particular DM.
Inputs:
dm: DM (in pc cm-3)
freqs: The list of frequencies (in MHz)
period: The profiles period (in seconds)
Outputs:
phasedelays: The corresponding phase delays.
"""
# Prepare delays
timedelays = psr_utils.delay_from_DM(dm, freqs)
# Reference all delays to highest frequency channel, which remains
# unchanged
# TODO: Do we really want to refer to high freq?
timedelays -= timedelays[np.argmax(freqs)]
phasedelays = timedelays/period
return phasedelays
def apply_dm(inprof, period, dm, chan_width, freqs, tsamp, \
do_delay=True, do_smear=True, do_scatter=True,
verbose=True):
"""Given a profile apply DM delays, smearing, and scattering
within each channel as is appropriate for the given params.
Inputs:
inprof: The profile to modify.
period: The profiles period (in seconds)
dm: The DM (in pc cm-3)
chan_width: The width of each channel (in MHz)
freqs: The list of frequencies (in MHz)
tsamp: Sample time of the recipient filterbank file (in seconds).
do_delay: Boolean, if True apply DM delays to each channel.
The highest freq channel is not shifted. (Default: True)
do_smear: Boolean, if True apply DM smearing to each channel.
(Default: True)
do_scatter: Boolean, if True apply scattering to each channel.
(Default: True)
Outputs:
vecprof: The delayed and smeared VectorProfile.
"""
weq = inprof.get_equivalent_width()
nfreqs = len(freqs)
if verbose:
print("Applying DM to profile (DM = %.2f; %d channels)..." % \
(dm, nfreqs))
# A list of profiles, one for each channel
profiles = []
if dm <= 0:
warnings.warn("DM will not be applied because it is 0 (or smaller?!)")
do_delay = False
do_smear = False
do_scatter = False
if do_delay:
phasedelays = get_phasedelays(dm, freqs, period)
else:
phasedelays = np.zeros(nfreqs)
# Prepare for smear campaign
smeartimes = psr_utils.dm_smear(dm, abs(chan_width), freqs) # In seconds
smearphases = smeartimes/period
# Prepare to scatter
scattertimes = psr_utils.pulse_broadening(dm, freqs)*1e-3 # In seconds
scatterphases = scattertimes/period
if DEBUG:
for ichan, (freq, smear, scatt, delay) in \
enumerate(zip(freqs, smearphases, scatterphases, phasedelays)):
print(" Chan #%d - Freq: %.3f MHz -- " \
"Smearing, scattering, delay (all in phase): " \
"%g, %g, %g" % (ichan, freq, smear, scatt, delay))
oldprogress = 0
sys.stdout.write(" %3.0f %%\r" % oldprogress)
sys.stdout.flush()
# ylim = None
# ylim2 = None
# ylim3 = None
# ylim4 = None
# ylim5 = None
for ii, (delayphs, smearphs, scattphs) in \
enumerate(zip(phasedelays, smearphases, scatterphases)):
#########
# DEBUG: plot all profiles
# plt.clf()
# ax = plt.subplot(5,1,1)
# inprof.plot()
# if ylim is not None:
# ax.set_ylim(ylim)
# else:
# ylim = ax.get_ylim()
if do_smear and not ((smearphs < 0.2*weq) or (smearphs < (tsamp/period))):
# Only smear if requested and smearing-phase is large enough
# bc = boxcar_factory(smearphs, delayphs)
# ax2 = plt.subplot(5,1,2,sharex=ax)
# bc.plot()
# if ylim2 is not None:
# ax2.set_ylim(ylim2)
# else:
# ylim2 = ax2.get_ylim()
if DEBUG:
print("Smearing")
tmpprof = inprof.smear(smearphs, delayphs, npts=NUMPOINTS)
else:
tmpprof = inprof.delay(delayphs)
phs = np.linspace(0, 1, NUMPOINTS+1)
tmpprof = SplineProfile(tmpprof(phs))
# ax3 = plt.subplot(5,1,3,sharex=ax)
# if ylim3 is not None:
# ax3.set_ylim(ylim3)
# else:
# ylim3 = ax3.get_ylim()
# tmpprof.plot()
if do_scatter and not ((scattphs < 0.2*weq) or (scattphs < (tsamp/period))):
# Only scatter if requested and scattering-phase is large enough
# ex = exponential_factory(scattphs)
# ax4 = plt.subplot(5,1,4,sharex=ax)
# ex.plot()
# if ylim4 is not None:
# ax4.set_ylim(ylim4)
# else:
# ylim4 = ax4.get_ylim()
if DEBUG:
print("Scattering")
tmpprof = tmpprof.scatter(scattphs, npts=NUMPOINTS)
# ax5 = plt.subplot(5,1,5,sharex=ax)
# tmpprof.plot()
# if ylim5 is not None:
# ax5.set_ylim(ylim5)
# else:
# ylim5 = ax5.get_ylim()
profiles.append(tmpprof)
# plt.xlim(0,1)
# plt.xlabel("Phase")
# plt.suptitle("Prof %d (%f MHz)" % (ii, freqs[ii]))
# plt.savefig("prof%d.png" % ii)
#########
# Print progress to screen
progress = int(100.0*ii/nfreqs)
if progress > oldprogress:
sys.stdout.write(" %3.0f %%\r" % progress)
sys.stdout.flush()
oldprogress = progress
sys.stdout.write("Done \n")
sys.stdout.flush()
dispersedprof = DispersedProfile(profiles, dm=dm, freqs=freqs, period=period,
intrinsic=inprof, delayed=do_delay)
return dispersedprof
def get_spline_profile(prof, npts=1024, **spline_kwargs):
"""Given a profile object evaluate it and return
a SplineProfile object. If the input profile object
is already an instance of SplineProfile, do nothing
and return the input profile.
Inputs:
prof: The profile object to conver to a SplineProfile.
npts: The number of points to use when evaluating the
profile. (Default: 1024)
**All additional keyword arguments are passed to the
spline constructor.
Outputs:
spline_prof: The resulting SplineProfile object.
"""
if isinstance(prof, SplineProfile):
# Input profile is already a SplineProfile. Do nothing. Return it.
return prof
else:
phs = np.linspace(0,1, npts, endpoint=False)
profvals = prof(phs)/prof.scale
spline_prof = SplineProfile(profvals, scale=prof.scale, **spline_kwargs)
return spline_prof
def vonmises_factory(amp,shape,loc):
"""Return a Profile with a single von Mises component.
Inputs:
amp: Amplitude of the von Mises function.
shape: A parameter characterising the width
(NOTE: this value is not directly the width.)
loc: Phase of the peak of the von Mises function.
Output:
vm_prof: A Profile object with a von Mises profile described
by the input parameters.
"""
# Need to use a factory for the von Mises functions
# to make sure the lambda uses amp,shape,loc from a local
# scope. The values in a lambda function are stored by reference
# and only looked up dynamically when the function is called.
def vm(ph):
return amp*np.exp(shape*(np.cos(2*np.pi*(ph-loc))-1))
return Profile(vm)
def boxcar_factory(width, delay=0):
"""Return a boxcar Profile scaled to have unit area.
Inputs:
width: The width of the boxcar function in phase.
NOTE: if width > 1, it will be folded
delay: The delay, in phase, to apply to the boxcar.
NOTE: positive delays cause the boxcar to be shifted
(Default: No delay)
to the right (i.e. larger pulse phase)
Output:
boxcar_prof: A boxcar Profile object with the given width.
"""
width = float(width) # Make sure width is a floating-point number
delay = delay % 1 # Make sure delay is in [0, 1)
nwraps = int(width/1)
rem = ((width%1)+delay)%1
if delay < rem:
# Boxcar does not straddle phase=1
def bc(ph):
return (nwraps + ((delay<=ph) & (ph<rem)).astype('int'))/width
else:
# Boxcar is split
def bc(ph):
return (nwraps + ((ph<rem) | (delay<=ph)).astype('int'))/width
return Profile(bc)
def exponential_factory(efold):
"""Return a one-sided exponential Profile.
Inputs:
efold: The e-folding phase of the exponential function.
Output:
exp_prof: A one-sided exponential Profile object.
"""
denom = (1-np.exp(-1/efold))*efold
def ex(ph):
# Denominator comes from sum of geometric series
return np.exp(-ph/efold)/denom
return Profile(ex)
def create_vonmises_components(vonmises_strs):
if not vonmises_strs:
warnings.warn("Using default von Mises profile (Amplitude=1.0 " \
"b=5, and phase=0.5)")
vonmises_comps = [vonmises_factory(1.0, 5, 0.5)]
else:
vonmises_comps = []
for vonmises_str in vonmises_strs:
split = vonmises_str.split()
if len(split) != 3:
raise ValueError("Bad number of von Mises components " \
"should be 3, got %d" % len(split))
amp = float(split[0])
shape = float(split[1])
loc = float(split[2])
# Need to use a factory for the von Mises functions
# to make sure the lambda uses amp,shape,loc from a local
# scope. The values in a lambda function are stored by reference
# and only looked up dynamically when the function is called.
vonmises_comps.append(vonmises_factory(amp,shape,loc))
return vonmises_comps
def scale_from_snr(fil, prof, snr, rms):
"""Set the profile's scaling factor such that the simulated
injected pulsar signal will have the given Smean.
Inputs:
fil: A FilterbankFile object.
prof: The Profile object representing the profile to inject.
snr: The desired signal-to-noise ratio
rms: The RMS of the recipient file's DM=0 time series.
Outputs:
scale: The scaling factor to apply to the profile.
"""
# Set scale to 1, just in case it's be altered already.
prof.set_scaling(1)
# Characterise the pulse
area = prof.get_area()
profmax = prof.get_max()
scale = snr*rms/fil.nchans/np.sqrt(fil.nspec*profmax*area)
print("Average area %s, average profile maximum: %s" % \
(np.mean(area), np.mean(profmax)))
print("Average recommended scale factor: %s" % np.mean(scale))
return scale
def snr_from_smean(fil, prof, smean, gain, tsys):
"""Set the profile's scaling factor such that the simulated
injected pulsar signal will have the given Smean.
Inputs:
fil: A FilterbankFile object.
prof: The Profile object representing the profile to inject.
smean: The mean flux density to simulate, in mJy.
gain: The telescope's gain, in K/Jy.
tsys: The observing system's temperature, in K.
Outputs:
snr: The target signal-to-noise ratio.
"""
dutycycle = prof.get_equivalent_width() # in phase
# Because width is reported in phase,
# it is actually the duty cycle
# Characterise the recipient filterbank file
tint = fil.nspec*fil.tsamp
bw = np.abs(fil.foff*fil.nchans)
# Target SNR
warnings.warn("Assuming 2 (summed) polarizations.")
snr = smean*gain*np.sqrt(2*tint*bw)/tsys*np.sqrt(1/dutycycle-1)
print("Expected SNR of injected pulsar signal (after folding " \
"and integrating over frequency): %s" % snr)
return snr
def inject(infile, outfn, prof, period, dm, nbitsout=None,
block_size=BLOCKSIZE, pulsar_only=False, inplace=False):
if isinstance(infile, filterbank.FilterbankFile):
fil = infile
elif inplace:
fil = filterbank.FilterbankFile(infile, 'readwrite')
else:
fil = filterbank.FilterbankFile(infile, 'read')
print("Injecting pulsar signal into: %s" % fil.filename)
if False:
delays = psr_utils.delay_from_DM(dm, fil.frequencies)
delays -= delays[np.argmax(fil.frequencies)]
get_phases = lambda times: (times-delays)/period % 1
else:
get_phases = lambda times: times/period % 1
# Create the output filterbank file
if nbitsout is None:
nbitsout = fil.nbits
if inplace:
warnings.warn("Injecting pulsar signal *in-place*")
outfil = fil
else:
# Start an output file
print("Creating out file: %s" % outfn)
outfil = filterbank.create_filterbank_file(outfn, fil.header, \
nbits=nbitsout, mode='append')
if outfil.nbits == 8:
raise NotImplementedError("This code is out of date. 'delays' is not " \
"done in this way anymore..")
# Read the first second of data to get the global scaling to use
onesec = fil.get_timeslice(0, 1).copy()
onesec_nspec = onesec.shape[0]
times = np.atleast_2d(np.arange(onesec_nspec)*fil.tsamp).T+delays
phases = times/period % 1
onesec += prof(phases)
minimum = np.min(onesec)
median = np.median(onesec)
# Set median to 1/3 of dynamic range
global_scale = (256.0/3.0) / median
del onesec
else:
# No scaling to be performed
# These values will cause scaling to keep data unchanged
minimum = 0
global_scale = 1
sys.stdout.write(" %3.0f %%\r" % 0)
sys.stdout.flush()
oldprogress = -1
# Loop over data
lobin = 0
spectra = fil.get_spectra(0, block_size).data.T
numread = spectra.shape[0]
while numread:
if pulsar_only:
# Do not write out data from input file
# zero it out
spectra *= 0
hibin = lobin+numread
# Sample at middle of time bin
times = (np.arange(lobin, hibin, 1.0/NINTEG_PER_BIN)+0.5/NINTEG_PER_BIN)*fil.dt
#times = (np.arange(lobin, hibin)+0.5)*fil.dt
phases = get_phases(times)
profvals = prof(phases)
shape = list(profvals.shape)
shape[1:1] = [NINTEG_PER_BIN] # these next lines add a new axis=1
shape[0] //= NINTEG_PER_BIN
profvals.shape = shape
toinject = profvals.mean(axis=1)
#toinject = profvals
if np.ndim(toinject) > 1:
injected = spectra+toinject
else:
injected = spectra+toinject[:,np.newaxis]
scaled = (injected-minimum)*global_scale
if inplace:
outfil.write_spectra(scaled, lobin)
else:
outfil.append_spectra(scaled)
# Print progress to screen
progress = int(100.0*hibin/fil.nspec)
if progress > oldprogress:
sys.stdout.write(" %3.0f %%\r" % progress)
sys.stdout.flush()
oldprogress = progress
# Prepare for next iteration
lobin = hibin
spectra = fil.get_spectra(lobin, block_size).data.T
numread = spectra.shape[0]
sys.stdout.write("Done \n")
sys.stdout.flush()
def load_profile(infn, verbose=True):
if verbose:
print("Loading profile from file (%s)" % infn)
data = np.load(infn)
profiles = []
for key in sorted(data.keys()):
profiles.append(SplineProfile(data[key]))
prof = VectorProfile(profiles)
data.close()
return prof
def save_profile(prof, outfn, verbose=True):
if verbose:
print("Writing %s instance to file (%s)" % \
(type(prof).__name__, outfn))
outfile = open(outfn, 'wb')
profvals = {}
for ii, pp in enumerate(prof.profiles):
profvals['chan%d' % ii] = pp.profvals
np.savez(outfile, **profvals)
outfile.close()
def parse_cfgstr(cfgstrs):
cfgs = {}
for cfgstr in cfgstrs:
for cfg in cfgstr.split(','):
key, val = cfg.split('=')
cfgs[key] = val
return cfgs
def get_scaling(fil, prof, cfgstrs):
"""Given a target filterbank file, a profile, and
a configuration string return the corresponding
scaling factor.
Inputs:
fil: A filterbank.FilterbankFile object.
prof: A Profile object.
cfgstrs: A list of strings containing configurations.
Output:
scaling: The corresponding scaling.
"""
cfgs = parse_cfgstr(cfgstrs)
return float(cfgs['scale'])
def get_scaling_from_snr(fil, prof, cfgstrs):
"""Given a target filterbank file, a profile, and
a configuration string compute the scaling factor
given the SNR.
Inputs:
fil: A filterbank.FilterbankFile object.
prof: A Profile object.
cfgstrs: A list of strings containing configurations.
Output:
scaling: The corresponding scaling.
"""
cfgs = parse_cfgstr(cfgstrs)
snr = float(cfgs['snr'])
rms = float(cfgs['rms'])
scale = scale_from_snr(fil, prof, snr=snr, rms=rms)
return scale
def get_scaling_from_smean(fil, prof, cfgstrs):
"""Given a target filterbank file, a profile, and
a configuration string compute the scaling factor
given the target mean flux density.
Inputs:
fil: A filterbank.FilterbankFile object.
prof: A Profile object.
cfgstrs: A list of strings containing configurations.
Output:
scaling: The corresponding scaling.
"""
cfgs = parse_cfgstr(cfgstrs)
smean = float(cfgs['smean'])
rms = float(cfgs['rms'])
gain = float(cfgs['gain'])
tsys = float(cfgs['tsys'])
snr = snr_from_smean(fil, prof, smean=smean, \
gain=gain, tsys=tsys)
scale = scale_from_snr(fil, prof, snr, rms=rms)
return scale
def get_scaling_from_file(fil, prof, cfgstrs):
"""Given a target filterbank file, a profile, and
a configuration string read scaling factors from
a text file. The file should have one floating point
number per line. There should be as many lines as there
are frequency channels in the filterbank file.
Inputs:
fil: A filterbank.FilterbankFile object.
prof: A Profile object.
cfgstrs: A list of strings containing configurations.
Output:
scaling: The corresponding scaling.
"""
cfgs = parse_cfgstr(cfgstrs)
fn = cfgs['file']
smean = float(cfgs['smean'])
col = int(cfgs.get('col', 0))
area = prof.get_area()
# 'conversion_factors' contains the per-channel conversion between mJy
# and receiver units (in units of mJy/rcvr)
conversion_factors = np.loadtxt(fn, usecols=(col,))
scales = smean/conversion_factors/area
return scales
SCALE_METHODS = {'scale': get_scaling, \
'snr': get_scaling_from_snr, \
'radiometer': get_scaling_from_smean, \
'scalefile': get_scaling_from_file}
def scale_profile(prof, scale_name, scale_cfgstrs, fil, verbose=True):
"""Scale the profile.
Inputs:
prof: The profile object to scale.
scale_name: The name of the type of scaler to use.
scale_cfgstrs: A list of configuration strings for the scaler.
fil: A FilterbankFile object.
verbose: Print extra information. (Default: True)
Outputs:
None - the scalings are applied directly to the input profile.
"""
# Determine scaling
scale_getter = SCALE_METHODS[scale_name]
scaling = scale_getter(fil, prof, scale_cfgstrs)
if verbose:
print("Band-averaged scale-factor: %g" % np.ma.masked_invalid(scaling).mean())
prof.set_scaling(scaling)
def make_profile(vonmises, verbose=True):
"""Create the profile object to use.
Inputs:
vonmises: A list of strings defining von mises components.
verbose: Print extra information. (Default: True)
"""
comps = create_vonmises_components(vonmises)
prof = MultiComponentProfile(comps)
if verbose:
print("Creating profile. Number of components: %d" % len(comps))
print("Profile area (intensity x phase): %g" % prof.get_area())
print("Equivalent width (phase): %g" % prof.get_equivalent_width())
print("FWHM (phase): %g" % prof.get_fwhm())
print("Profile maximum: %g" % prof.get_max())
prof = get_spline_profile(prof)
return prof
def main():
fn = args.infile
if args.inplace:
fil = filterbank.FilterbankFile(fn, mode='readwrite')
else:
fil = filterbank.FilterbankFile(fn, mode='read')
if args.inprof is not None:
warnings.warn("Saved profiles already may be tuned to a particular " \
"DM, period and filterbank file (freq, nchans, " \
"tsamp, etc).")
prof = load_profile(args.inprof)
else:
prof = make_profile(args.vonmises)
prof = apply_dm(prof, args.period, args.dm, \
fil.foff, fil.frequencies, fil.tsamp)
scale_profile(prof, args.scale_name, args.scale_cfgstrs, fil)
if args.outprof is not None:
save_profile(prof, args.outprof)
outfn = args.outname % fil.header
print("Showing plot of profile to be injected...")
plt.figure()
plt.clf()
prof.plot(dedisp=True)
plt.xlim(0,1)
plt.savefig(outfn+".ps")
if args.dryrun:
sys.exit()
inject(fil, outfn, prof, args.period, args.dm, \
nbitsout=args.output_nbits, block_size=args.block_size, \
pulsar_only=args.pulsar_only, inplace=args.inplace)
def parse_model_file(modelfn):
"""Parse a pass model file (*.m) written by paas.
Return a list of parameters describing each component.
In particular (amplitude, shape, phase).
Input:
modelfn: The name of the model file.
Outputs:
params: List of parameters for each component.
(i.e. "amplitude shape phase")
"""
mfile = open(modelfn, 'r')
return [" ".join(reversed(line.split())) \
for line in mfile.readlines()]
class ParseMfileAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
vonmises = getattr(namespace, self.dest)
vonmises.extend(parse_model_file(values))
class ScaleHelpAction(argparse.Action):
def __call__(self, parser, namepsace, values, option_string=None):
helptext = "Scaling methods:\n\n" + \
"scale - Multiply injected signal by a scaling factor\n" + \
"Configs: 'scale' - (float) the scaling factor\n\n" + \
"snr - Scale the injected signal so the integrated \n" + \
" profile has the given SNR\n" + \
"Configs: 'snr' - (float) the target SNR\n" + \
" 'rms' - (float) the RMS of the cleaned DM=0 timeseries\n\n" + \
"radiometer - Scale the injected signal so the integrated \n" + \
" profile has the given mean flux density\n" + \
"Configs: 'smean' - (float) the target mean flux density (in mJy)\n" + \
" 'gain' - (float) the telescope's gain (in K/Jy)\n" + \
" 'tsys' - (float) the observing system's temperature (in K)\n" + \
" 'rms' - (float) the RMS of the cleaned DM=0 timeseries\n\n" + \
"scalefile - Scale the signal in each channel independently\n" + \
" according to the scaling factors in the file\n" + \
"Configs: 'smean' - (float) the target mean flux density\n" + \
" 'file' - (string) a text file containing per-channel \n" + \
" conversion factors from flux-density to 'receiver' \n" + \
" units (in mJy/rcvr).\n" + \
" Each row should have a single floating-point \n" + \
" number. The number and order of the rows should \n" + \
" correspond to the input filterbank file.\n" + \
" 'col' - (int) The column number to read conversion factors \n" +\
" from. Columns are numbered starting at 0.\n" + \
" (Default: first column)\n\n"
sys.stderr.write(helptext)
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='injectpsr.py', \
description="v0.7 Patrick Lazarus (Jan 16, 2014)")
parser.add_argument("--dm", dest='dm', type=float, \
help="The DM of the (fake) injected pulsar signal. " \
"(This argument is required.", \
default=None)
parser.add_argument("-p", "--period", dest='period', \
default=None, type=float, \
help="The *topocentric* period (in seconds) of the (fake) injected " \
"pulsar signal. (This argument is required.)")
parser.add_argument("-c", "--scale-configs", dest='scale_cfgstrs', type=str, \
required=True, default=[], action='append', \
help="A string of comma-separated parameters to " \
"configure how the injected signal is scaled. " \
"Format is '<param-name>=<value>,...'")
parser.add_argument("-s", "--scale-method", dest='scale_name', type=str, \
required=True, \
help="Name of a method for determining how the " \
"injected signal is scaled.")
parser.add_argument("--scale-help", dest='show_scale_help', \
nargs=0, action=ScaleHelpAction, \
help="Show help text for scaling methods, parameters.")
parser.add_argument("-v", "--vonmises", dest='vonmises', action='append', \
help="A string of 3 parameters defining a vonmises " \
"component to be injected. Be sure to quote the " \
"3 parameters together. The params are: 'amplitude " \
"shape phase'. Amplitude is not related to SNR in " \
"any way. Also, phase should be between 0 and 1. " \
"(Default: if no compoments are provided " \
"a von Mises with amplitude=1.0, shape=5, and " \
"phase=0.5 will be used.)", \
default=[])
parser.add_argument("-m", "--model-file", dest="vonmises", \
type=str, action=ParseMfileAction, \
help="A model file (*.m) as written by 'paas'.")
parser.add_argument("--block-size", dest='block_size', default=BLOCKSIZE, \
type=float, \
help="Number of spectra per block. This is the amount " \
"of data manipulated/written at a time. (Default: " \
" %d spectra)" % BLOCKSIZE)
parser.add_argument("--nbits", dest='output_nbits', default=None, type=int, \
help="Number of bits per same to use in output " \
"filterbank file. (Default: same as input file)")
parser.add_argument("-n", "--dryrun", dest="dryrun", action="store_true", \
help="Show the pulse profile to be injected and exit. " \
"(Default: do not show profile, inject it)")
parser.add_argument("--no-apply-dm", dest='apply_dm', action='store_false', \
default=True, \
help="Do not apply the DM (i.e. do not delay or smear " \
"the pulse; Default: Apply DM)")
parser.add_argument("--load-prof", dest="inprof", default=None, \
help="Load a profile object from file. (Default: " \
"create a fresh profile object.)")
parser.add_argument("--save-prof", dest='outprof', default=None, \
help="Save a profile object to file. (Default: " \
"do not save profile object.)")
parser.add_argument("-o", "--outname", dest='outname', \
default="injected.fil", \
help="The name of the output file.")
parser.add_argument("--write-pulsar-only", dest='pulsar_only', \
action='store_true', \
help="Only write the pulsar signal to the output file. "
"That is, do not include the data from the input "
"file. This is useful for debugging. (Default: "
"write data from input file _and_ pulsar signal.)")
parser.add_argument("--in-place", dest='inplace', \
action='store_true', \
help="Inject the pulsar signal in-place. " \
"THIS WILL OVERWRITE THE INPUT DATAFILE!" \
"(Default: Do _not_ inject in-place)")
parser.add_argument("infile", \
help="File that will receive synthetic pulses.")
args = parser.parse_args()
if args.period is None or args.dm is None:
raise ValueError("Both a period and a DM _must_ be provided!")
if args.scale_name is not None and args.inprof is not None:
raise ValueError("Loading a saved profile via the " \
"'--load-prof' args is incompatible " \
"with scaling the profile.")
main()
| 48,866 | 37.387274 | 94 | py |
presto | presto-master/bin/psrfits2fil.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import range
import numpy as np
from presto import psrfits
from presto import filterbank
from presto import sigproc
import optparse
import sys
import os
import time
def translate_header(psrfits_file):
fits_hdr = psrfits_file.header
subint_hdr = psrfits_file.fits['SUBINT'].header
subint_data = psrfits_file.fits['SUBINT'].data
fil_header = {}
if fits_hdr['TELESCOP'] in sigproc.telescope_ids:
fil_header["telescope_id"] = \
sigproc.telescope_ids[fits_hdr['TELESCOP']]
else:
fil_header["telescope_id"] = -1
if fits_hdr['BACKEND'] in sigproc.machine_ids:
fil_header["machine_id"] = \
sigproc.machine_ids[fits_hdr['BACKEND']]
else:
fil_header["machine_id"] = -1
fil_header["data_type"] = 1 # filterbank
fn = psrfits_file.filename
fil_header["rawdatafile"] = os.path.basename(fn)
fil_header["source_name"] = fits_hdr['SRC_NAME']
fil_header["barycentric"] = 0 # always not barycentered?
fil_header["pulsarcentric"] = 0 # whats pulsarcentric?
fil_header["az_start"] = subint_data[0]['TEL_AZ']
fil_header["za_start"] = subint_data[0]['TEL_ZEN']
fil_header["src_raj"] = float(fits_hdr['RA'].replace(':',''))
fil_header["src_dej"] = float(fits_hdr['DEC'].replace(':',''))
fil_header["tstart"] = fits_hdr['STT_IMJD'] + \
fits_hdr['STT_SMJD']/86400.0 + \
fits_hdr['STT_OFFS']/86400.0
fil_header["tsamp"] = subint_hdr['TBIN']
fil_header["nbits"] = None # set by user. Input should always be 4-bit.
# first channel (fch1) in sigproc is the highest freq
# foff is negative to signify this
fil_header["fch1"] = fits_hdr['OBSFREQ'] + \
np.abs(fits_hdr['OBSBW'])/2.0 - \
np.abs(subint_hdr['CHAN_BW'])/2.0
fil_header["foff"] = -1.0*np.abs(subint_hdr['CHAN_BW'])
fil_header["nchans"] = subint_hdr['NCHAN']
fil_header["nifs"] = subint_hdr['NPOL']
return fil_header
def main(fits_fn, outfn, nbits, \
apply_weights, apply_scales, apply_offsets):
start = time.time()
psrfits_file = psrfits.PsrfitsFile(fits_fn)
fil_header = translate_header(psrfits_file)
fil_header['nbits'] = nbits
outfil = filterbank.create_filterbank_file(outfn, fil_header, \
nbits=nbits)
# if frequency channels are in ascending order
# band will need to be flipped
if psrfits_file.fits['SUBINT'].header['CHAN_BW'] > 0:
flip_band=True
print("\nFits file frequencies in ascending order.")
print("\tFlipping frequency band.\n")
else:
flip_band=False
# check nbits for input
if psrfits_file.nbits < 4:
raise ValueError('Does not support %d-bit data' % \
psrfits_file.nbits)
if nbits != 32:
print("\nCalculating statistics on first subintegration...")
subint0 = psrfits_file.read_subint(0, \
apply_weights, apply_scales, apply_offsets)
#new_max = np.mean(subint0) + 3*np.std(subint0)
new_max = 3 * np.median(subint0)
print("\t3*median =",new_max)
if new_max > 2.0**nbits:
scale = True
scale_fac = new_max / ( 2.0**nbits )
print("\tScaling data by",1/scale_fac)
print("\tValues larger than",new_max,"(pre-scaling) "\
"will be set to",2.0**nbits - 1,"\n")
else:
scale = False
scale_fac = 1
print("\tNo scaling necessary")
print("\tValues larger than",2.0**nbits-1,"(2^nbits) will "\
"be set to ",2.0**nbits-1,"\n")
else:
scale_fac = 1
print("\nNo scaling necessary for 32-bit float output file.")
print("Writing data...")
sys.stdout.flush()
oldpcnt = ""
for isub in range(int(psrfits_file.nsubints)):
subint = psrfits_file.read_subint(isub, apply_weights, apply_scales, apply_offsets)
if flip_band:
subint = np.fliplr(subint)
subint /= scale_fac
outfil.append_spectra(subint)
pcnt = "%d" % (isub*100.0/psrfits_file.nsubints)
if pcnt != oldpcnt:
sys.stdout.write("% 4s%% complete\r" % pcnt)
sys.stdout.flush()
print("Done ")
outfil.close()
print("Runtime:",time.time() - start)
if __name__=='__main__':
parser = optparse.OptionParser(prog='psrfits2fil.py',
version="v0.2 Paul Scholz, Patrick Lazarus (Sept 2012)",
usage = "usage: %prog [options] input_fits")
parser.add_option("-n",dest='nbits', action='store',
default=8, type='int',
help="The number of bits in the output .fil file. " +
"Default=8")
parser.add_option("-o",dest='outfn',action='store',
default=None, type='string',
help="The filename of the output filterbank file. " +
"Default: same as .fits input but with .fil extn")
parser.add_option("--noweights", dest='apply_weights',
default=True, action="store_false",
help="Do not apply weights when converting data.")
parser.add_option("--noscales", dest='apply_scales',
default=True, action="store_false",
help="Do not apply scales when converting data.")
parser.add_option("--nooffsets", dest='apply_offsets',
default=True, action="store_false",
help="Do not apply offsets when converting data.")
(options, args) = parser.parse_args()
fits_fn = args[0]
if options.outfn:
outfn = options.outfn
else:
outfn = '.'.join(fits_fn.split('.')[:-1]) + '.fil'
main(fits_fn, outfn, options.nbits, options.apply_weights,
options.apply_scales, options.apply_offsets)
| 6,102 | 37.626582 | 91 | py |
presto | presto-master/bin/GBNCC_search.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import zip
from builtins import str
from builtins import range
from builtins import object
from operator import attrgetter
import glob, os, os.path, shutil, socket, tarfile, stat
import numpy, sys, time
from presto import presto
from presto import sifting
import astropy.io.fits as pyfits
institution = "NRAO"
base_tmp_dir = "." # "/dev/shm/" is a good choice
# This is where the output will be archived.
base_output_dir = "."
#-------------------------------------------------------------------
# Tunable parameters for searching and folding
# (you probably don't need to tune any of them)
raw_N = 1440000 # Number of samples to analyze (~118 secs)
rfifind_chunk_time = 25600 * 0.00008192 # ~2.1 sec
singlepulse_threshold = 5.0 # threshold SNR for candidate determination
singlepulse_plot_SNR = 5.5 # threshold SNR for singlepulse plot
singlepulse_maxwidth = 0.1 # max pulse width in seconds
to_prepfold_sigma = 6.0 # incoherent sum significance to fold candidates
max_lo_cands_to_fold = 20 # Never fold more than this many lo-accel candidates
max_hi_cands_to_fold = 10 # Never fold more than this many hi-accel candidates
numhits_to_fold = 2 # Number of DMs with a detection needed to fold
low_DM_cutoff = 1.0 # Lowest DM to consider as a "real" pulsar
lo_accel_numharm = 16 # max harmonics
lo_accel_sigma = 2.0 # threshold gaussian significance
lo_accel_zmax = 0 # bins
lo_accel_flo = 2.0 # Hz
hi_accel_numharm = 8 # max harmonics
hi_accel_sigma = 3.0 # threshold gaussian significance
hi_accel_zmax = 50 # bins
hi_accel_flo = 1.0 # Hz
low_T_to_search = 50.0 # sec
# Sifting specific parameters (don't touch without good reason!)
sifting.sigma_threshold = to_prepfold_sigma-1.0 # incoherent power threshold (sigma)
sifting.c_pow_threshold = 100.0 # coherent power threshold
sifting.r_err = 1.1 # Fourier bin tolerence for candidate equivalence
sifting.short_period = 0.0005 # Shortest period candidates to consider (s)
sifting.long_period = 15.0 # Longest period candidates to consider (s)
sifting.harm_pow_cutoff = 8.0 # Power required in at least one harmonic
foldnsubs = 128 # Number of subbands to use when folding
#-------------------------------------------------------------------
def find_masked_fraction(obs):
"""
find_masked_fraction(obs):
Parse the output file from an rfifind run and return the
fraction of the data that was suggested to be masked.
"""
rfifind_out = obs.basefilenm + "_rfifind.out"
for line in open(rfifind_out):
if "Number of bad intervals" in line:
return float(line.split("(")[1].split("%")[0])/100.0
# If there is a problem reading the file, return 100%
return 100.0
def timed_execute(cmd, run_cmd=1):
"""
timed_execute(cmd):
Execute the command 'cmd' after logging the command
to STDOUT. Return the wall-clock amount of time
the command took to execute.
"""
sys.stdout.write("\n'"+cmd+"'\n")
sys.stdout.flush()
start = time.time()
if run_cmd: os.system(cmd)
end = time.time()
return end - start
def get_folding_command(cand, obs, ddplans, maskfile):
"""
get_folding_command(cand, obs, ddplans, maskfile):
Return a command for prepfold for folding the subbands using
an obs_info instance, a list of the ddplans, and a candidate
instance that describes the observations and searches.
"""
# Folding rules are based on the facts that we want:
# 1. Between 24 and 200 bins in the profiles
# 2. For most candidates, we want to search length = 101 p/pd/DM cubes
# (The side of the cube is always 2*M*N+1 where M is the "factor",
# either -npfact (for p and pd) or -ndmfact, and N is the number of bins
# in the profile). A search of 101^3 points is pretty fast.
# 3. For slow pulsars (where N=100 or 200), since we'll have to search
# many points, we'll use fewer intervals in time (-npart 30)
# 4. For the slowest pulsars, in order to avoid RFI, we'll
# not search in period-derivative.
zmax = cand.filename.split("_")[-1]
outfilenm = obs.basefilenm+"_DM%s_Z%s"%(cand.DMstr, zmax)
hidms = [x.lodm for x in ddplans[1:]] + [2000]
dfacts = [x.downsamp for x in ddplans]
for hidm, dfact in zip(hidms, dfacts):
if cand.DM < hidm:
downsamp = dfact
break
if downsamp==1:
fitsfile = obs.fits_filenm
else:
fitsfile = obs.dsbasefilenm+"_DS%d%s"%(downsamp,obs.fits_filenm[obs.fits_filenm.rfind("_"):])
p = 1.0 / cand.f
if (p < 0.002):
Mp, Mdm, N = 2, 2, 24
otheropts = "-npart 50 -ndmfact 3"
elif p < 0.05:
Mp, Mdm, N = 2, 1, 50
otheropts = "-npart 40 -pstep 1 -pdstep 2 -dmstep 3"
elif p < 0.5:
Mp, Mdm, N = 1, 1, 100
otheropts = "-npart 30 -pstep 1 -pdstep 2 -dmstep 1"
else:
Mp, Mdm, N = 1, 1, 200
otheropts = "-npart 30 -nopdsearch -pstep 1 -pdstep 2 -dmstep 1"
return "prepfold -mask %s -noxwin -accelcand %d -accelfile %s.cand -dm %.2f -o %s %s -n %d -npfact %d -ndmfact %d -nsub %d %s" % \
(maskfile, cand.candnum, cand.filename, cand.DM, outfilenm,
otheropts, N, Mp, Mdm, foldnsubs, fitsfile)
class obs_info(object):
"""
class obs_info(fits_filenm)
A class describing the observation and the analysis.
"""
def __init__(self, fits_filenm):
self.fits_filenm = fits_filenm
self.basefilenm = fits_filenm[:fits_filenm.find(".fits")]
self.dsbasefilenm = fits_filenm[:fits_filenm.rfind("_")]
fitshandle=pyfits.open(fits_filenm)
self.MJD = fitshandle[0].header['STT_IMJD']+fitshandle[0].header['STT_SMJD']/86400.0+fitshandle[0].header['STT_OFFS']/86400.0
self.nchans = fitshandle[0].header['OBSNCHAN']
self.ra_string = fitshandle[0].header['RA']
self.dec_string = fitshandle[0].header['DEC']
self.str_coords = "J"+"".join(self.ra_string.split(":")[:2])
self.str_coords += "".join(self.dec_string.split(":")[:2])
self.nbits=fitshandle[0].header['BITPIX']
self.raw_N=fitshandle[1].header['NAXIS2']*fitshandle[1].header['NSBLK']
self.dt=fitshandle[1].header['TBIN']*1000000
self.raw_T = self.raw_N * self.dt
self.N = raw_N
if self.dt == 163.84:
self.N=self.N/2
self.T = self.N * self.dt
self.srcname=fitshandle[0].header['SRC_NAME']
# Determine the average barycentric velocity of the observation
self.baryv = presto.get_baryv(self.ra_string, self.dec_string,
self.MJD, self.T, obs="GB")
# Where to dump all the results
# Directory structure is under the base_output_directory
# according to base/MJD/filenmbase/beam
self.outputdir = os.path.join(base_output_dir,
str(int(self.MJD)),
self.srcname)
# Figure out which host we are processing on
self.hostname = socket.gethostname()
# The fraction of the data recommended to be masked by rfifind
self.masked_fraction = 0.0
# Initialize our timers
self.rfifind_time = 0.0
self.downsample_time = 0.0
self.dedispersing_time = 0.0
self.FFT_time = 0.0
self.lo_accelsearch_time = 0.0
self.hi_accelsearch_time = 0.0
self.singlepulse_time = 0.0
self.sifting_time = 0.0
self.folding_time = 0.0
self.total_time = 0.0
# Inialize some candidate counters
self.num_sifted_cands = 0
self.num_folded_cands = 0
self.num_single_cands = 0
def write_report(self, filenm):
report_file = open(filenm, "w")
report_file.write("---------------------------------------------------------\n")
report_file.write("%s was processed on %s\n"%(self.fits_filenm, self.hostname))
report_file.write("Ending UTC time: %s\n"%(time.asctime(time.gmtime())))
report_file.write("Total wall time: %.1f s (%.2f hrs)\n"%\
(self.total_time, self.total_time/3600.0))
report_file.write("Fraction of data masked: %.2f%%\n"%\
(self.masked_fraction*100.0))
report_file.write("---------------------------------------------------------\n")
report_file.write(" rfifind time = %7.1f sec (%5.2f%%)\n"%\
(self.rfifind_time, self.rfifind_time/self.total_time*100.0))
report_file.write(" dedispersing time = %7.1f sec (%5.2f%%)\n"%\
(self.dedispersing_time, self.dedispersing_time/self.total_time*100.0))
report_file.write(" single-pulse time = %7.1f sec (%5.2f%%)\n"%\
(self.singlepulse_time, self.singlepulse_time/self.total_time*100.0))
report_file.write(" FFT time = %7.1f sec (%5.2f%%)\n"%\
(self.FFT_time, self.FFT_time/self.total_time*100.0))
report_file.write(" lo-accelsearch time = %7.1f sec (%5.2f%%)\n"%\
(self.lo_accelsearch_time, self.lo_accelsearch_time/self.total_time*100.0))
report_file.write(" hi-accelsearch time = %7.1f sec (%5.2f%%)\n"%\
(self.hi_accelsearch_time, self.hi_accelsearch_time/self.total_time*100.0))
report_file.write(" sifting time = %7.1f sec (%5.2f%%)\n"%\
(self.sifting_time, self.sifting_time/self.total_time*100.0))
report_file.write(" folding time = %7.1f sec (%5.2f%%)\n"%\
(self.folding_time, self.folding_time/self.total_time*100.0))
report_file.write("---------------------------------------------------------\n")
report_file.close()
class dedisp_plan(object):
"""
class dedisp_plan(lodm, dmstep, dmsperpass, numpasses, numsub, downsamp)
A class describing a de-dispersion plan for prepsubband in detail.
"""
def __init__(self, lodm, dmstep, dmsperpass, numpasses, numsub, downsamp):
self.lodm = float(lodm)
self.dmstep = float(dmstep)
self.dmsperpass = int(dmsperpass)
self.numpasses = int(numpasses)
self.numsub = int(numsub)
self.downsamp = int(downsamp)
self.sub_dmstep = self.dmsperpass * self.dmstep
self.dmlist = [] # These are strings for comparison with filenames
self.subdmlist = []
for ii in range(self.numpasses):
self.subdmlist.append("%.2f"%(self.lodm + (ii+0.5)*self.sub_dmstep))
lodm = self.lodm + ii * self.sub_dmstep
dmlist = ["%.2f"%dm for dm in \
numpy.arange(self.dmsperpass)*self.dmstep + lodm]
self.dmlist.append(dmlist)
def remove_crosslist_duplicate_candidates(candlist1,candlist2):
n1 = len(candlist1)
n2 = len(candlist2)
removelist1 = []
removelist2 = []
candlist2.sort(key=attrgetter('r'))
candlist1.sort(key=attrgetter('r'))
print(" Searching for crosslist dupes...")
ii = 0
while ii < n1:
jj=0
while jj < n2:
if numpy.fabs(candlist1[ii].r-candlist2[jj].r) < sifting.r_err:
if candlist1[ii].sigma > candlist2[jj].sigma:
print("Crosslist remove from candlist 2, %f > %f, %d:%f~%f" % \
(candlist1[ii].sigma, candlist2[jj].sigma, jj,
candlist1[ii].r, candlist2[jj].r))
if jj not in removelist2:
removelist2.append(jj)
else:
print("Crosslist remove from candlist 1, %f > %f, %d:%f~%f" % \
(candlist2[jj].sigma, candlist1[ii].sigma, ii,
candlist1[ii].r, candlist2[jj].r))
if ii not in removelist1:
removelist1.append(ii)
jj += 1
ii += 1
for ii in range(len(removelist2)-1,-1,-1):
print("Removing %d from candlist2" % removelist2[ii])
del(candlist2[removelist2[ii]])
for ii in range(len(removelist1)-1,-1,-1):
print("Removing %d from candlist1" % removelist1[ii])
del(candlist1[removelist1[ii]])
print("Removed %d crosslist candidates\n" % (len(removelist1)+len(removelist2)))
print("Found %d candidates. Sorting them by significance...\n" % (len(candlist1)+len(candlist2)))
candlist1.sort(key=attrgetter('sigma'), reverse=True)
candlist2.sort(key=attrgetter('sigma'), reverse=True)
return candlist1,candlist2
def main(fits_filenm, workdir, ddplans):
# Change to the specified working directory
os.chdir(workdir)
# Get information on the observation and the job
job = obs_info(fits_filenm)
if job.raw_T < low_T_to_search:
print("The observation is too short (%.2f s) to search."%job.raw_T)
sys.exit()
job.total_time = time.time()
if job.dt == 163.84:
ddplans = ddplans[str(job.nchans)+"slow"]
else:
ddplans = ddplans[str(job.nchans)+"fast"]
# Use whatever .zaplist is found in the current directory
default_zaplist = glob.glob("*.zaplist")[0]
# Make sure the output directory (and parent directories) exist
try:
os.makedirs(job.outputdir)
os.chmod(job.outputdir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH)
except: pass
# Make sure the tmp directory (in a tmpfs mount) exists
tmpdir = os.path.join(base_tmp_dir, job.basefilenm)
try:
os.makedirs(tmpdir)
except: pass
print("\nBeginning GBNCC search of '%s'"%job.fits_filenm)
print("UTC time is: %s"%(time.asctime(time.gmtime())))
rfifindout=job.basefilenm+"_rfifind.out"
rfifindmask=job.basefilenm+"_rfifind.mask"
if not os.path.exists(rfifindout) or not os.path.exists(rfifindmask):
# rfifind the filterbank file
cmd = "rfifind -time %.17g -o %s %s > %s_rfifind.out"%\
(rfifind_chunk_time, job.basefilenm,
job.fits_filenm, job.basefilenm)
job.rfifind_time += timed_execute(cmd)
maskfilenm = job.basefilenm + "_rfifind.mask"
# Find the fraction that was suggested to be masked
# Note: Should we stop processing if the fraction is
# above some large value? Maybe 30%?
job.masked_fraction = find_masked_fraction(job)
# Iterate over the stages of the overall de-dispersion plan
dmstrs = []
for ddplan in ddplans:
# Make a downsampled filterbank file
if ddplan.downsamp > 1:
cmd = "psrfits_subband -dstime %d -nsub %d -o %s_DS%d %s"%\
(ddplan.downsamp, job.nchans, job.dsbasefilenm, ddplan.downsamp, job.dsbasefilenm )
job.downsample_time += timed_execute(cmd)
fits_filenm = job.dsbasefilenm + "_DS%d%s"%\
(ddplan.downsamp,job.fits_filenm[job.fits_filenm.rfind("_"):])
else:
fits_filenm = job.fits_filenm
# Iterate over the individual passes through the .fil file
for passnum in range(ddplan.numpasses):
subbasenm = "%s_DM%s"%(job.basefilenm, ddplan.subdmlist[passnum])
# Now de-disperse
cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -nsub %d -numdms %d -numout %d -o %s/%s %s"%\
(maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep,
ddplan.dmstep, ddplan.numsub,
ddplan.dmsperpass, job.N/ddplan.downsamp,
tmpdir, job.basefilenm, fits_filenm)
job.dedispersing_time += timed_execute(cmd)
# Do the single-pulse search
cmd = "single_pulse_search.py -p -m %f -t %f %s/*.dat"%\
(singlepulse_maxwidth, singlepulse_threshold, tmpdir)
job.singlepulse_time += timed_execute(cmd)
spfiles = glob.glob("%s/*.singlepulse"%tmpdir)
for spfile in spfiles:
try:
shutil.move(spfile, workdir)
except: pass
# Iterate over all the new DMs
for dmstr in ddplan.dmlist[passnum]:
dmstrs.append(dmstr)
basenm = os.path.join(tmpdir, job.basefilenm+"_DM"+dmstr)
datnm = basenm+".dat"
fftnm = basenm+".fft"
infnm = basenm+".inf"
# FFT, zap, and de-redden
cmd = "realfft %s"%datnm
job.FFT_time += timed_execute(cmd)
cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
(default_zaplist, job.baryv, fftnm)
job.FFT_time += timed_execute(cmd)
cmd = "rednoise %s"%fftnm
job.FFT_time += timed_execute(cmd)
try:
os.rename(basenm+"_red.fft", fftnm)
except: pass
# Do the low-acceleration search
cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
(lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, fftnm)
job.lo_accelsearch_time += timed_execute(cmd)
try:
os.remove(basenm+"_ACCEL_%d.txtcand"%lo_accel_zmax)
except: pass
try: # This prevents errors if there are no cand files to copy
shutil.move(basenm+"_ACCEL_%d.cand"%lo_accel_zmax, workdir)
shutil.move(basenm+"_ACCEL_%d"%lo_accel_zmax, workdir)
except: pass
# Do the high-acceleration search
cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
(hi_accel_numharm, hi_accel_sigma, hi_accel_zmax, hi_accel_flo, fftnm)
job.hi_accelsearch_time += timed_execute(cmd)
try:
os.remove(basenm+"_ACCEL_%d.txtcand"%hi_accel_zmax)
except: pass
try: # This prevents errors if there are no cand files to copy
shutil.move(basenm+"_ACCEL_%d.cand"%hi_accel_zmax, workdir)
shutil.move(basenm+"_ACCEL_%d"%hi_accel_zmax, workdir)
except: pass
# Move the .inf files
try:
shutil.move(infnm, workdir)
except: pass
# Remove the .dat and .fft files
try:
os.remove(datnm)
except: pass
try:
os.remove(fftnm)
except: pass
# Make the single-pulse plots
basedmb = job.basefilenm+"_DM"
basedme = ".singlepulse "
# The following will make plots for DM ranges:
# 0-30, 20-110, 100-310, 300-1000+
dmglobs = [basedmb+"[0-9].[0-9][0-9]"+basedme +
basedmb+"[012][0-9].[0-9][0-9]"+basedme,
basedmb+"[2-9][0-9].[0-9][0-9]"+basedme +
basedmb+"10[0-9].[0-9][0-9]"+basedme,
basedmb+"[12][0-9][0-9].[0-9][0-9]"+basedme +
basedmb+"30[0-9].[0-9][0-9]"+basedme,
basedmb+"[3-9][0-9][0-9].[0-9][0-9]"+basedme +
basedmb+"1[0-9][0-9][0-9].[0-9][0-9]"+basedme]
dmrangestrs = ["0-30", "20-110", "100-310", "300-1000+"]
psname = job.basefilenm+"_singlepulse.ps"
for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
cmd = 'single_pulse_search.py -t %f -g "%s"' % \
(singlepulse_plot_SNR, dmglob)
job.singlepulse_time += timed_execute(cmd)
try:
os.rename(psname,
job.basefilenm+"_DMs%s_singlepulse.ps"%dmrangestr)
except: pass
# Sift through the candidates to choose the best to fold
job.sifting_time = time.time()
lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%lo_accel_zmax))
if len(lo_accel_cands):
lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
if len(lo_accel_cands):
lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, numhits_to_fold,
dmstrs, low_DM_cutoff)
hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%hi_accel_zmax))
if len(hi_accel_cands):
hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
if len(hi_accel_cands):
hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, numhits_to_fold,
dmstrs, low_DM_cutoff)
if len(lo_accel_cands) and len(hi_accel_cands):
lo_accel_cands, hi_accel_cands = remove_crosslist_duplicate_candidates(lo_accel_cands, hi_accel_cands)
if len(lo_accel_cands):
lo_accel_cands.sort(key=attrgetter('sigma'), reverse=True)
sifting.write_candlist(lo_accel_cands,
job.basefilenm+".accelcands_Z%d"%lo_accel_zmax)
if len(hi_accel_cands):
hi_accel_cands.sort(key=attrgetter('sigma'), reverse=True)
sifting.write_candlist(hi_accel_cands,
job.basefilenm+".accelcands_Z%d"%hi_accel_zmax)
try:
cmd = "mv *.accelcands* "+job.outputdir
os.system(cmd)
except: pass
job.sifting_time = time.time() - job.sifting_time
# Fold the best candidates
cands_folded = 0
for cand in lo_accel_cands:
if cands_folded == max_lo_cands_to_fold:
break
elif cand.sigma > to_prepfold_sigma:
job.folding_time += timed_execute(get_folding_command(cand, job, ddplans, maskfilenm))
cands_folded += 1
cands_folded = 0
for cand in hi_accel_cands:
if cands_folded == max_hi_cands_to_fold:
break
elif cand.sigma > to_prepfold_sigma:
job.folding_time += timed_execute(get_folding_command(cand, job, ddplans, maskfilenm))
cands_folded += 1
# Remove the bestprof files
bpfiles = glob.glob("*.pfd.bestprof")
for bpfile in bpfiles:
os.remove(bpfile)
# Now step through the .ps files and convert them to .png and gzip them
psfiles = glob.glob("*.ps")
for psfile in psfiles:
if "singlepulse" in psfile:
os.system("pstoimg -density 200 -antialias -crop a "+psfile)
try:
os.remove(psfile)
except: pass
else:
os.system("pstoimg -density 200 -antialias -flip cw "+psfile)
os.system("gzip "+psfile)
# Tar up the results files
tar_suffixes = ["_ACCEL_%d.tgz"%lo_accel_zmax,
"_ACCEL_%d.tgz"%hi_accel_zmax,
"_ACCEL_%d.cand.tgz"%lo_accel_zmax,
"_ACCEL_%d.cand.tgz"%hi_accel_zmax,
"_singlepulse.tgz",
"_inf.tgz",
"_pfd.tgz"]
tar_globs = ["*_ACCEL_%d"%lo_accel_zmax,
"*_ACCEL_%d"%hi_accel_zmax,
"*_ACCEL_%d.cand"%lo_accel_zmax,
"*_ACCEL_%d.cand"%hi_accel_zmax,
"*.singlepulse",
"*_DM[0-9]*.inf",
"*.pfd"]
for (tar_suffix, tar_glob) in zip(tar_suffixes, tar_globs):
tf = tarfile.open(job.basefilenm+tar_suffix, "w:gz")
for infile in glob.glob(tar_glob):
tf.add(infile)
os.remove(infile)
tf.close()
# Remove all the downsampled .fits files
fitsfiles = glob.glob("*_DS?*.fits") + glob.glob("*_DS??*.fits")
for fitsfile in fitsfiles:
os.remove(fitsfile)
# Remove the tmp directory (in a tmpfs mount)
try:
os.rmdir(tmpdir)
except: pass
# And finish up
job.total_time = time.time() - job.total_time
print("\nFinished")
print("UTC time is: %s"%(time.asctime(time.gmtime())))
# Write the job report
job.write_report(job.basefilenm+".report")
job.write_report(os.path.join(job.outputdir, job.basefilenm+".report"))
# Move all the important stuff to the output directory
cmd = "mv *rfifind.[bimors]* *.tgz *.ps.gz *.png *.report "+\
job.outputdir
os.system(cmd)
if __name__ == "__main__":
# Create our de-dispersion plans
# All GBNCC data have 4096 channels, but the earliest data is sampled
# at 163.84us rather than 81.92 us...
ddplans = {'4096slow':[], '4096fast':[]}
if (1):
#
# If there is <=1GB of RAM per CPU core, the following are preferred
#
# For 4096slow chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans['4096slow'].append(dedisp_plan( 0.0, 0.02, 86, 81, 128, 1))
ddplans['4096slow'].append(dedisp_plan(139.32, 0.03, 102, 27, 128, 2))
ddplans['4096slow'].append(dedisp_plan(221.94, 0.05, 102, 33, 128, 4))
ddplans['4096slow'].append(dedisp_plan(390.24, 0.10, 102, 11, 128, 8))
# For 4096fast chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans['4096fast'].append(dedisp_plan( 0.0, 0.01, 86, 81, 128, 1))
ddplans['4096fast'].append(dedisp_plan( 69.66, 0.02, 86, 33, 128, 2))
ddplans['4096fast'].append(dedisp_plan(126.42, 0.03, 102, 29, 128, 4))
ddplans['4096fast'].append(dedisp_plan(215.16, 0.05, 102, 33, 128, 8))
ddplans['4096fast'].append(dedisp_plan(383.46, 0.10, 102, 12, 128, 16))
else:
# If there is >2GB of RAM per CPU core, the following are preferred
#
# For 4096slow chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans['4096slow'].append(dedisp_plan( 0.0, 0.02, 172, 41, 256, 1))
ddplans['4096slow'].append(dedisp_plan(141.04, 0.03, 204, 14, 256, 2))
ddplans['4096slow'].append(dedisp_plan(226.72, 0.05, 204, 16, 256, 4))
ddplans['4096slow'].append(dedisp_plan(389.92, 0.10, 204, 6, 256, 8))
# For 4096fast chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans['4096fast'].append(dedisp_plan( 0.0, 0.01, 172, 41, 256, 1))
ddplans['4096fast'].append(dedisp_plan( 70.52, 0.02, 172, 16, 256, 2))
ddplans['4096fast'].append(dedisp_plan(125.56, 0.03, 204, 15, 256, 4))
ddplans['4096fast'].append(dedisp_plan(217.36, 0.05, 204, 17, 256, 8))
ddplans['4096fast'].append(dedisp_plan(390.76, 0.10, 204, 6, 256, 16))
# Arguments to the search program are
# sys.argv[1] = PSRFITS file name
# sys.argv[2] = working directory name
if len(sys.argv) >= 3:
workdir = sys.argv[2]
fits_filenm = sys.argv[1]
main(fits_filenm, workdir, ddplans)
elif len(sys.argv) == 2:
fits_filenm = sys.argv[1]
main(fits_filenm, '.', ddplans)
else:
print("GBNCC_search.py fits_filenm [workdir]")
| 27,528 | 44.729236 | 134 | py |
presto | presto-master/bin/detrend_dat.py | #!/usr/bin/env python
import os, sys
import numpy as np
from astropy.stats import sigma_clip
import scipy.signal
if len(sys.argv) != 2:
print("\nusage: {} file\n".format(sys.argv[0]))
sys.exit(1)
os.rename(sys.argv[1], sys.argv[1]+".bak")
data = np.fromfile(sys.argv[1]+".bak", dtype=np.float32)
N = len(data)
nblocks = 10000
data.shape = (nblocks, N/nblocks)
block_stds = np.std(data, axis=1)
good_stds = sigma_clip(block_stds, sigma=4.0)
stds_inds = np.arange(nblocks)[~good_stds.mask]
# zero-out the bad blocks
data[good_stds.mask,:] *= 0.0
print("Found %d bad blocks out of %d" % (good_stds.mask.sum(), nblocks))
# Now detrend the good blocks
for ii in stds_inds:
data[ii] = scipy.signal.detrend(data[ii], type='linear')
data.ravel().tofile(sys.argv[1])
| 779 | 22.636364 | 72 | py |
presto | presto-master/bin/pygaussfit.py | #!/usr/bin/env python
import os
import sys
from presto.psr_utils import gaussian_profile, read_profile
from matplotlib.patches import Rectangle
from presto.bestprof import bestprof
import matplotlib.pyplot as plt
import numpy as np
from presto import mpfit
import subprocess
class GaussianSelector(object):
def __init__(self, ax, profile, errs, profnm, minspanx=None,
minspany=None, useblit=True):
self.ax = ax.axes
self.profile = profile
self.proflen = len(profile)
self.profnm = profnm
self.phases = np.arange(self.proflen, dtype='d')/self.proflen
self.errs = errs
self.visible = True
self.DCguess = sorted(profile)[len(profile) // 10 + 1]
self.init_params = [self.DCguess]
self.numgaussians = 0
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('button_press_event', self.press)
self.canvas.mpl_connect('button_release_event', self.release)
self.canvas.mpl_connect('draw_event', self.update_background)
self.background = None
self.rectprops = dict(facecolor='white', edgecolor = 'black',
alpha=0.5, fill=False)
self.to_draw = Rectangle((0,0), 0, 1, visible=False, **self.rectprops)
self.ax.add_patch(self.to_draw)
self.useblit = useblit
self.minspanx = minspanx
self.minspany = minspany
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
self.plot_gaussians(self.init_params)
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress == None:
return event.inaxes!= self.ax
# If a button was pressed, check if the release-button is the
# same.
return (event.inaxes != self.ax or
event.button != self.eventpress.button)
def press(self, event):
'on button press event'
# Is the correct button pressed within the correct axes?
if self.ignore(event): return
# make the drawed box/line visible get the click-coordinates,
# button, ...
self.eventpress = event
if event.button==1:
self.to_draw.set_visible(self.visible)
self.eventpress.ydata = self.DCguess
def release(self, event):
'on button release event'
if self.eventpress is None or self.ignore(event): return
# release coordinates, button, ...
self.eventrelease = event
if event.button==1:
# make the box/line invisible again
self.to_draw.set_visible(False)
self.canvas.draw()
xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
# calculate dimensions of box
if xmin>xmax: xmin, xmax = xmax, xmin
if ymin>ymax: ymin, ymax = ymax, ymin
spanx = xmax - xmin
spany = ymax - ymin
xproblems = self.minspanx is not None and spanx<self.minspanx
yproblems = self.minspany is not None and spany<self.minspany
# call desired function
self.onselect()
self.eventpress = None # reset the variables to their
self.eventrelease = None # inital values
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.to_draw)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
def onmove(self, event):
if self.eventpress is None or self.ignore(event): return
x,y = event.xdata, event.ydata # actual position with button still pressed
minx, maxx = self.eventpress.xdata, x # click-x and actual mouse-x
miny, maxy = self.eventpress.ydata, y # click-y and actual mouse-y
if minx>maxx: minx, maxx = maxx, minx # get them in the right order
if miny>maxy: miny, maxy = maxy, miny
self.to_draw.set_x(minx) # set lower left of box
self.to_draw.set_y(miny)
self.to_draw.set_width(maxx-minx) # set width and height of box
self.to_draw.set_height(maxy-miny)
self.update()
def plot_gaussians(self, params):
plt.subplot(211)
plt.cla()
# Re-plot the original profile
plt.plot(self.phases, self.profile, c='black', lw=3, alpha=0.3)
plt.xlabel('Pulse Phase')
plt.ylabel('Pulse Amplitude')
DC = params[0]
# Plot the individual gaussians
for ii in range(self.numgaussians):
phase, FWHM, amp = params[1+ii*3:4+ii*3]
plt.plot(self.phases, DC + amp*gaussian_profile(self.proflen, phase, FWHM))
def onselect(self):
event1 = self.eventpress
event2 = self.eventrelease
# Left mouse button = add a gaussian
if event1.button == event2.button == 1:
x1, y1 = event1.xdata, event1.ydata
x2, y2 = event2.xdata, event2.ydata
phase = 0.5*(x1+x2)
FWHM = np.fabs(x2-x1)
amp = np.fabs(1.05*(y2-self.init_params[0])*(x2-x1))
self.init_params += [phase, FWHM, amp]
self.numgaussians += 1
self.plot_gaussians(self.init_params)
plt.draw()
# Middle mouse button = fit the gaussians
elif event1.button == event2.button == 2:
fit_params, fit_errs, chi_sq, dof = \
fit_gaussians(self.profile, self.init_params,
np.zeros(self.proflen)+self.errs,
self.profnm)
# Save the fit parameters so the caller can retrieve them if needed
self.fit_params = fit_params
self.fit_errs = fit_errs
# scaled uncertainties
#scaled_fit_errs = fit_errs * np.sqrt(chi_sq / dof)
# Plot the best-fit profile
self.plot_gaussians(fit_params)
fitprof = gen_gaussians(fit_params, self.proflen)
self.fitprof = fitprof
plt.plot(self.phases, fitprof, c='black', lw=1)
plt.draw()
# Plot the residuals
plt.subplot(212)
plt.cla()
residuals = self.profile - fitprof
plt.errorbar(self.phases, residuals, self.errs,fmt='.')
plt.grid(True)
plt.xlabel('Pulse Phase')
plt.ylabel('Data-Fit Residuals')
plt.draw()
# Right mouse button = remove last gaussian
elif event1.button == event2.button == 3:
if self.numgaussians:
self.init_params = self.init_params[:-3]
self.numgaussians -= 1
self.plot_gaussians(self.init_params)
plt.draw()
plt.subplot(212)
plt.cla()
plt.xlabel('Pulse Phase')
plt.ylabel('Data-Fit Residuals')
plt.draw()
def gen_gaussians(params, N):
"""
gen_gaussians(params, N):
Return a model of a DC-component + M gaussians
params is a sequence of 1+M*3 values
the first value is the DC component. Each remaining
group of three represents the gaussians phase (0-1),
FWHM (0-1), and amplitude (>0.0).
N is the number of points in the model.
"""
numgaussians = (len(params)-1) // 3
model = np.zeros(N, dtype='d') + params[0]
for ii in range(numgaussians):
phase, FWHM, amp = params[1+ii*3:4+ii*3]
model += amp * gaussian_profile(N, phase, FWHM)
return model
def fit_function(params, fjac=None, data=None, errs=None):
return [0, (data - gen_gaussians(params, len(data))) / errs]
def fit_gaussians(data, initial_params, errs, profnm):
numparams = len(initial_params)
numgaussians = (len(initial_params)-1) // 3
# Generate the parameter structure
parinfo = []
params0 = []
for ii in range(numparams):
params0.append(initial_params[ii])
parinfo.append({'value':initial_params[ii], 'fixed':0,
'limited':[0,0], 'limits':[0.,0.]})
other_args = {'data':data, 'errs':errs}
# Now fit it
mpfit_out = mpfit.mpfit(fit_function, params0, functkw=other_args,
parinfo=parinfo, quiet=1)
fit_params = mpfit_out.params
fit_errs = mpfit_out.perror
# degrees of freedom
dof = len(data) - len(fit_params)
# chi-squared for the model fit
chi_sq = mpfit_out.fnorm
print("------------------------------------------------------------------")
print("Multi-Gaussian Fit by pygaussfit.py of '%s'"%profnm)
print("------------------------------------------------------------------")
print("mpfit status:", mpfit_out.status)
print("gaussians:", numgaussians)
print("DOF:", dof)
print("chi-sq: %.2f" % chi_sq)
print("reduced chi-sq: %.2f" % (chi_sq/dof))
residuals = data - gen_gaussians(fit_params, len(data))
print("residuals mean: %.3g" % np.mean(residuals))
print("residuals stdev: %.3g" % np.std(residuals))
print("--------------------------------------")
print(" const = %.5f +/- %.5f" % (fit_params[0], fit_errs[0]))
for ii in range(numgaussians):
print(" phas%d = %.5f +/- %.5f" % (ii+1, fit_params[1+ii*3], fit_errs[1+ii*3]))
print(" fwhm%d = %.5f +/- %.5f" % (ii+1, fit_params[2+ii*3], fit_errs[2+ii*3]))
print(" ampl%d = %.5f +/- %.5f" % (ii+1, fit_params[3+ii*3], fit_errs[3+ii*3]))
print("--------------------------------------")
return fit_params, fit_errs, chi_sq, dof
if __name__ == '__main__':
noise_stdev = 0.0
if len(sys.argv)==1:
from numpy.random import normal
print("""usage: python pygaussfit.py input_file [prof_stdev]
Left mouse draws a region roughly boxing where you'll place a gaussian.
Draw several to fit multiple gaussians.
Middle mouse performs the fit.
Right mouse removes the last gaussian from the fit.
The input_file should simply be an ASCII file with columns for pulse phase
and amplitude, or, a PSRCHIVE archive works as long as you have the PSRCHIVE
python interface installed. Comments in the text file starting with "#" are
allowed. *.bestprof files work.
Paste the full resulting STDOUT to a '.gaussians' file for use
in get_TOAs.py or sum_profiles.py with the '-g' parameter as a template.""")
N = 128
DC = 600.0
noise_stdev = 8.0
params = [DC]
params += [0.25, 0.1, 30.0]
params += [0.3, 0.2, 15.0]
params += [0.8, 0.05, 20.0]
prof = normal(0.0, noise_stdev, N) + gen_gaussians(params, N)
filenm = "test"
else:
if sys.argv[1].endswith(".pfd"):
print("Input is PFD")
# Input is pfd file
pfdfn = sys.argv[1]
# Check for bestprof
if not os.path.exists(pfdfn+".bestprof"):
print("Creating bestprof file")
# Create bestprof file with show_pfd
devnull = open(os.devnull, 'w')
subprocess.call(['show_pfd', '-noxwin', pfdfn],
stdout=devnull)
devnull.close()
filenm = pfdfn+".bestprof"
prof = read_profile(filenm, normalize=0)
else:
filenm = sys.argv[1]
try:
prof = read_profile(filenm, normalize=0)
except:
import psrchive
arch = psrchive.Archive_load(filenm)
#arch.bscrunch_to_nbin(256)
arch.dedisperse()
arch.fscrunch()
arch.tscrunch()
arch.convert_state('Stokes')
subint = arch.get_Integration(0)
(b_mean, b_var) = subint.baseline_stats()
noise_stdev = np.sqrt(b_var[0][0])
sprof = subint.get_Profile(0,0)
prof = sprof.get_amps()
if len(sys.argv)>=3:
noise_stdev = float(sys.argv[2])
elif noise_stdev == 0.0:
try:
bprof = bestprof(sys.argv[1])
noise_stdev = bprof.prof_std
except:
# Use the std of the smallest 25% of the bins
n = len(prof)//4
# The 2.07 compensates for the bias of this method if the data
# is pure gaussian noise
noise_stdev = np.partition(prof, n)[:n].std() * 2.07
print("Using stdev of lowest 25% of bins as noise level: ", noise_stdev)
fig = plt.figure()
dataplot = fig.add_subplot(211)
interactor = GaussianSelector(dataplot, prof, noise_stdev, filenm)
plt.show()
| 13,567 | 40.240122 | 88 | py |
presto | presto-master/bin/filter_zerolags.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import range
import numpy as N
import sys, scipy.io, scipy.signal
if len(sys.argv) != 2:
print("\nusage: {} file\n".format(sys.argv[0]))
sys.exit(1)
plot=0
infilenm = sys.argv[1]
basename = infilenm[:infilenm.find(".zerolags")]
dt = 0.00008192
flo = 2.0 # cutoff freq in Hz
passband = 0.8 # Fractional freq where passband ends
stopband = 1.2 # Fractional freq where stopband starts
max_pass_atten = 3.0 # dB
min_stop_atten = 30.0 # dB
zls = N.fromfile(infilenm, 'f')
numpts = len(zls)
if (plot):
from presto.Pgplot import *
plotxy(zls)
T = numpts*dt
fnyq = numpts/(T*2)
cutoff = flo/fnyq
# Determine an "average" standard deviation
stds = []
for ii in range(100):
loind = int(N.random.rand() * (numpts-1001))
hiind = loind + 1000
stds.append(N.std(zls[loind:hiind]))
goodstd = N.median(stds)
# First, detrend the data in a piecewise linear fashion
# where the pieces are defined by jumps in the data
num_bps = 0
max_num_break_points = 100
break_points = N.zeros(max_num_break_points)
dzls = N.fabs(zls[1:] - zls[:-1])
argsort_jumps = dzls.argsort()[::-1]
ii = 0
dind = 200
while (ii < numpts-1):
index = argsort_jumps[ii]
# Don't allow the breakpoints to be within 100 of each other
if num_bps and min(N.fabs(break_points[:num_bps] - index)) < 100:
ii += 1
continue
if index > 100:
lomean = N.mean(zls[index-dind:index-20])
else:
lomean = N.mean(zls[:index])
if index < numpts-1-dind:
himean = N.mean(zls[index+20:index+dind])
else:
himean = N.mean(zls[index:])
if N.fabs(himean - lomean) > goodstd:
break_points[num_bps] = index
num_bps += 1
if num_bps == max_num_break_points: break
ii += 1
if dzls[index] < 3.0 * goodstd: break
if (num_bps):
break_points = break_points[:num_bps]
break_points.sort()
detrend_zls = scipy.signal.detrend(zls, bp=break_points)
print("%s: Found %d breakpoints for detrending "%(basename, num_bps), break_points)
else:
detrend_zls = scipy.signal.detrend(zls)
print("%s: Found 0 breakpoints for detrending"%basename)
# Now high-pass filter the data to get rid of the not-so-drastic
# power fluctuations
logood = numpts/5
newzls = N.zeros(numpts+logood, 'f')
newzls[:logood] += detrend_zls[:logood][::-1]
newzls[logood:] += detrend_zls
b, a = scipy.signal.iirdesign(passband*cutoff, stopband*cutoff,
max_pass_atten, min_stop_atten,
analog=0, ftype='ellip', output='ba')
filtered_zls = scipy.signal.lfilter(b, a, newzls)[logood:]
if (plot):
plotxy(filtered_zls, color='red')
closeplot()
# This is the total offset that we will apply to the data
offset = (zls - detrend_zls) + filtered_zls
outfilenm = basename+".offset"
outfile = open(outfilenm, 'wb')
scipy.io.fwrite(outfile, numpts, offset, 'f')
outfile.close()
| 2,984 | 26.638889 | 87 | py |
presto | presto-master/bin/fitorb.py | #!/usr/bin/env python
"""
fitorb: A non-linear optimizer for solving pulsar orbits by Ryan Lynch
"""
from __future__ import print_function
from builtins import range
from numpy import *
from presto.mpfit import mpfit
from presto.psr_constants import SECPERDAY,TWOPI,DEGTORAD,SOL
from presto import psr_utils
from presto import parfile
from pylab import *
import sys
cspeed = 299792458.0 # m/s
# begin function definitions
def print_usage():
print("""
A non-linear least-squares optimizer for solving pulsar orbits
Usage: fitorb.py [-p p] [-pb pb] [-x x] [-T0 T0] [-e e] [-w w] [-par par_file] [-nofit const_params] bestprof_files
-p: Initial guess for pulsar spin period (s; required without -par)
-pb: Initial guess for orbital period (days; required without -par)
-x: Initial guess for projected semi-major axis (s; required
without -par)
-T0: Initial guess for time of periastron passage (MJD; required
without -par)
-e: Initial guess for orbital eccentricity (default = 0)
-w: Initial guess for longitude of periastron (deg; default = 0)
-par: A tempo .par file with values of the spin and orbital
parameters to use as initial guesses (if given, you don't need
to specify the above parameters)
-nofit: A comma separated string of parameters to hold constant
(e.g. "-nofit p,pb,x")
-o: Root of the output file name(s) (default = "fitorb")
bestprof_files: prepfold .bestprof files containing measurements of p and p-dot
(and possibly p-ddot)
""")
return None
def parse_cmd_line(args):
"""
Parse command line argumentss
Input
-----
args - a list of command line aruments and values
Output
------
user-supplied values for the given arguments
"""
# create a list of valid command line arguments
valid_args = ["-p", "-pb", "-x", "-T0", "-e", "-w", "-par", "-nofit","-o"]
if len(args) == 0:
print_usage()
exit(0)
for arg in args:
# check to make sure all arguments are valid
if (arg.startswith("-")) and (arg not in valid_args) and \
not arg.strip("-").replace(".","").isdigit():
print("ERROR: Unknown arg %s"%arg)
print_usage()
exit(0)
# go through the given arguments and store user-supplied values
try:
const_params = args.pop(args.index("-nofit")+1)
args.remove("-nofit")
except ValueError:
const_params = ""
pass
if "-par" in args:
try:
par_file_name = args.pop(args.index("-par")+1)
args.remove("-par")
par = parfile.psr_par(par_file_name)
p = par.P0
pb_days = par.PB
x = par.A1
T0 = par.T0
e = par.E
w = par.OM
except IOError:
print("ERROR: %s not found\n"%par_file_name)
exit(0)
except AttributeError:
print("ERROR: %s does not appear to be a valid binary .par file\n" \
%par_file_name)
exit(0)
else:
try:
p = float(args.pop(args.index("-p")+1))
args.remove("-p")
except ValueError:
print("ERROR: You must specify a spin period\n")
exit(0)
try:
pb_days = float(args.pop(args.index("-pb")+1))
args.remove("-pb")
except ValueError:
print("ERROR: You must specify an orbital period\n")
exit(0)
try:
x = float(args.pop(args.index("-x")+1))
args.remove("-x")
except ValueError:
print("ERROR: You must specify a projected semi-major axis\n")
exit(0)
try:
T0 = float(args.pop(args.index("-T0")+1))
args.remove("-T0")
except ValueError:
print("ERROR: You must specify a time of periastron passage\n")
exit(0)
try:
e = float(args.pop(args.index("-e")+1))
args.remove("-e")
except ValueError:
print("WARNING: Orbital eccentricity not specified, assuming e = 0\n")
e = 0.0
const_params = const_params + ",e"
pass
try:
w = float(args.pop(args.index("-w")+1))
args.remove("-w")
except ValueError:
print("WARNING: Longitude of periastron not specified, assuming w = 0\n")
w = 0.0
const_params = const_params + ",w"
pass
try:
out_file_root = args.pop(args.index("-o")+1)
args.remove("-o")
except ValueError:
out_file_root = "fitorb"
pass
in_files = args
return p,pb_days,x,T0,e,w,const_params,out_file_root,in_files
def read_bestprof(file_name):
"""
Read relevant information from prepfold .bestprof files (written by
Scott Ransom
Input
-----
file_name - string containing the path to a .bestprof file
Output
------
epoch - the barycentric epoch (MJD) of the observation
N*dt - length of observation (number of data points * sampling time)
p0,p1,p2 - observed spin period and higher-order period derivatives
"""
in_file = open(file_name)
bary = N = 0
epoch = dt = p0 = p1 = p2 = 0.0
for line in in_file.readlines():
if line[0] == "#":
if line.startswith("# T_sample"):
dt = float(line.split("=")[-1])
continue
if line.startswith("# Data Folded"):
N = float(line.split("=")[-1])
continue
if line.startswith("# Epoch_topo"):
try:
epochi = float(line.split("=")[-1].split(".")[0])
epochf = float("0."+line.split("=")[-1].split(".")[1])
epoch = epochi+epochf
except ValueError:
pass
continue
if line.startswith("# Epoch_bary"):
try:
epochi = float(line.split("=")[-1].split(".")[0])
epochf = float("0."+line.split("=")[-1].split(".")[1])
epoch = epochi+epochf
bary = 1
except ValueError:
pass
if ((bary and line.startswith("# P_bary")) or
(not bary and line.startswith("# P_topo"))):
p0 = float(line.split("=")[-1].split("+")[0])/1000.0
continue
if ((bary and line.startswith("# P'_bary")) or
(not bary and line.startswith("# P'_topo"))):
p1 = float(line.split("=")[-1].split("+")[0])
continue
if ((bary and line.startswith("# P''_bary")) or
(not bary and line.startswith("# P''_topo"))):
p2 = float(line.split("=")[-1].split("+")[0])
continue
else:
break
return (epoch, N*dt, p0, p1, p2)
def read_par(pfname,f1errmax=999.0):
pf = parfile.psr_par(pfname)
# Try to see how many freq derivs we have
fs = [pf.F0]
for ii in range(1, 20): # hopefully 20 is an upper limit!
attrib = "F%d"%ii
if hasattr(pf, attrib):
fs.append(getattr(pf, attrib))
else:
break
epoch = pf.PEPOCH
Tobs = (pf.FINISH - pf.START) * 86400.0
return epoch,Tobs,fs
def get_params_info(params_start, const_params):
"""
Build a list of dictionaries with information about spin and orbital
parameters to be passed to mpfit
Input
-----
params_start - a list of initial guesses for parameter values
const_params - a string containing the parameters to hold constant
during fit
Output
------
params_info - a list of dictionaries with information on each
parameter
"""
params_info = []
# check to see if each parameter should be helt constant
if "p" in const_params.split(","):
params_info.append({"parname":"p", # parameter name
"value":params_start[0], # initial guess
"limited":[True,True], # bounded above and below?
# upper and low limits (used if "limited" is "True"
"limits":[0.9*params_start[0],1.1*params_start[0]],
"fixed":True}) # parameter fixed?
print("Holding spin period constant")
else:
params_info.append({"parname":"p",
"value":params_start[0],
"limited":[True,True],
"limits":[0.9*params_start[0],1.1*params_start[0]],
"fixed":False})
if "pb" in const_params.split(","):
params_info.append({"parname":"pb",
"value":params_start[1],
"limited":[True,False],
"limits":[0.0,0.0],
"fixed":True})
print("Holding orbital period constant")
else:
params_info.append({"parname":"pb",
"value":params_start[1],
"limited":[True,False],
"limits":[0.0,0.0],
"fixed":False})
if "x" in const_params.split(","):
params_info.append({"parname":"x",
"value":params_start[2],
"limited":[True,False],
"limits":[0.0,0.0],
"fixed":True})
print("Holding projected semi-major axis constant")
else:
params_info.append({"parname":"x",
"value":params_start[2],
"limited":[True,False],
"limits":[0.0,0.0],
"fixed":False})
if "T0" in const_params.split(","):
params_info.append({"parname":"T0",
"value":params_start[3],
"limited":[True,True],
"limits":[params_start[3] - params_start[1]/SECPERDAY,
params_start[3] + params_start[1]/SECPERDAY],
"fixed":True})
print("Holding time of periastron passage constant")
else:
params_info.append({"parname":"T0",
"value":params_start[3],
"limited":[True,True],
"limits":[params_start[3] - params_start[1]/SECPERDAY,
params_start[3] + params_start[1]/SECPERDAY],
"fixed":False})
if "e" in const_params.split(","):
params_info.append({"parname":"e",
"value":params_start[4],
"limited":[True,True],
"limits":[0.0,1.0],
"fixed":True})
print("Holding eccentricity constant")
else:
params_info.append({"parname":"e",
"value":params_start[4],
"limited":[True,True],
"limits":[0.0,1.0],
"fixed":False})
if "w" in const_params.split(","):
params_info.append({"parname":"w",
"value":params_start[5],
"limited":[True,True],
"limits":[0.0,360.0],
"fixed":True})
print("Holding longitude of periastron constant")
else:
params_info.append({"parname":"w",
"value":params_start[5],
"limited":[True,True],
"limits":[0.0,360.0],
"fixed":False})
return params_info
def myasarray(a):
"""
Properly format array (written by Scott Ransom)
Input
-----
a - python array
Output
------
a - modified python array
"""
if type(a) in [type(1.0),type(1),type(1),type(1j)]:
a = asarray([a])
if len(a) == 0:
a = asarray([a])
return a
def calc_omega(params, MJD):
"""
Calculate w in at the barycentric epoch MJD (written by Scott Ransom)
Input
-----
params - a list of parameter values
MJD - barycentric epoch MJD
Output
------
w in radians
"""
return params[5]*DEGTORAD
def eccentric_anomaly(params, mean_anomaly):
"""
Calculate the eccentric anomaly using a simplte iteration to solve
Kepler's Equations (written by Scott Ransom)
Input
-----
params - a list of parameter values
mean_anomaly - the mean anomaly
Output
------
the eccentric anomaly in radians
"""
ma = fmod(mean_anomaly, TWOPI)
ma = where(ma < 0.0, ma+TWOPI, ma)
eccentricity = params[4]
ecc_anom_old = ma
ecc_anom = ma + eccentricity*sin(ecc_anom_old)
# This is a simple iteration to solve Kepler's Equation
while (maximum.reduce(fabs(ecc_anom-ecc_anom_old)) > 5e-15):
ecc_anom_old = ecc_anom[:]
ecc_anom = ma + eccentricity*sin(ecc_anom_old)
return ecc_anom
def calc_anoms(params, MJD):
"""
Calculate the mean, eccentric, and true anomalies at the barycentric
epoch MJD (written by Scott Ransom)
Input
-----
params - a list of parameter values
MJD - the barycentric epoch MJD
Output
------
mean_anom - mean anomaly in radians
ecc_anom - eccentric enomaly in radians
true_anom - the true anomaly in radians
"""
MJD = myasarray(MJD)
difft = (MJD - params[3])*SECPERDAY
sec_since_peri = fmod(difft, params[1])
sec_since_peri[sec_since_peri < 0.0] += params[1]
mean_anom = sec_since_peri/params[1]*TWOPI
ecc_anom = eccentric_anomaly([params[0],params[1],
params[2],params[3],params[4],
params[5]], mean_anom)
true_anom = psr_utils.true_anomaly(ecc_anom, params[4])
return (mean_anom, ecc_anom, true_anom)
def radial_velocity(params, MJD):
"""
Calculate the radial velocity of the pulsar at the given MJD
(written by Scott Ransom)
Input
-----
params - a list of parameter values
MJD - the barycentric epoch MJD
Output
------
the radial velocity in km/s
"""
ma, ea, ta = calc_anoms([params[0],params[1],params[2],
params[3],params[4],params[5]], MJD)
ws = calc_omega([params[0],params[1],params[2],
params[3],params[4],params[5]], MJD)
c1 = TWOPI*params[2]/params[1];
c2 = cos(ws)*sqrt(1-params[4]*params[4]);
sws = sin(ws);
cea = cos(ea)
return SOL/1000.0*c1*(c2*cea - sws*sin(ea)) / (1.0 - params[4]*cea)
def plot_file_panel(in_file,params):
period = []
time = []
if in_file.endswith('.par'):
(epoch,T,fs) = read_par(in_file)
for minute in arange(int(T/60.0+0.5)):
t = minute/1440.0
time.append(t)
period.append(1.0/psr_utils.calc_freq(epoch+t,epoch,*fs))
else:
(epoch, T, p0, p1, p2) = read_bestprof(in_file)
for minute in arange(int(T/60.0+0.5)):
t = minute*60.0
time.append(minute/1440.0)
period.append(p0 + t*(p1 + 0.5*t*p2))
print("Plotting: file, epoch, Tobs",in_file,epoch,T)
period = asarray(period)
time = asarray(time)
plot(time,period*1000.0,'o')
xlabel('Time (s)')
ylabel('Period (ms)')
title("%.3f" % (epoch,))
model_time = arange(epoch-0.1, epoch+max(time)+0.1, 0.001)
plot( model_time-epoch,doppler_period(params, model_time)*1000.0,'r')
def doppler_period(params, MJD):
"""
Calculate the doppler modulated pulse period (written by Scott Ransom)
Input
-----
params - list of parameter values
MJD - barycentric epoch MJD
Output
------
observed pulse period in seconds
"""
vs = radial_velocity([params[0],params[1],params[2],
params[3],params[4],params[5]], MJD) \
*1000.0 # m/s
return params[0]*(1.0+vs/SOL)
def funct(params, fjac=None, times=None, measured=None):
"""
Calculate the difference between the modeled and observed period
Input
-----
params - list of parameter values
fjac - function for calculating the Jacobian (if None mpfit
will use a default method)
times - array of MJDs when period observations were made
measured - array of observed periods (in seconds)
Output
------
a list containing the exit status (used by mpfit) and the
differences between the model and data
"""
status = 0 # this will probably always be zero
return [status,doppler_period([params[0],params[1],params[2],
params[3],params[4],params[5]],
times) - measured]
# parse the command line
p_start,pb_days_start,x_start,T0_start,e_start,w_start,const_params,out_file_root, \
in_files = parse_cmd_line(sys.argv[1:])
pb_sec_start = pb_days_start * SECPERDAY # need orbital period in seconds
# store user-supplied initial guesses for parameter values
params_start = [p_start, pb_sec_start, x_start, T0_start, e_start, w_start]
# build the dictionary of parameter information
params_info = get_params_info(params_start, const_params)
period = []
time = []
pepochs = []
p0s = []
p1s = []
# get the observed periods and times from the .bestprof files
for in_file in in_files:
if in_file.endswith('.par'):
(epoch,T,fs) = read_par(in_file)
if (fs[1] != 0.0):
p0tmp,p1tmp = psr_utils.p_to_f(fs[0],fs[1])
p0s.append(p0tmp)
p1s.append(p1tmp)
pepochs.append(epoch)
for minute in arange(int(T/60.0+0.5)):
t = epoch + minute/1440.0
time.append(t)
period.append(1.0/psr_utils.calc_freq(t,epoch,*fs))
else:
(epoch, T, p0, p1, p2) = read_bestprof(in_file)
for minute in arange(int(T/60.0+0.5)):
t = minute*60.0
time.append(epoch + minute/1440.0)
period.append(p0 + t*(p1 + 0.5*t*p2))
if p1 != 0.0:
p0s.append(p0)
p1s.append(p1)
pepochs.append(epoch)
Torb = min(time)
period = asarray(period)
time = asarray(time)
p0s = asarray(p0s)
p1s = asarray(p1s)
pepochs = asarray(pepochs)
accs = cspeed*p1s/p0s
# Plot ellipse figure
figure(1)
plot(p0s*1000.0,accs,"o")
grid(True)
title('Acceleration vs Period')
xlabel('Period (ms)')
ylabel('Acceleration (m/s^2)')
figure(2)
if len(in_files) < 5:
nsubrows = 1
nsubcols = len(in_files)
else:
nsubrows = (len(in_files)-1)//5 + 1
nsubcols = 5
ip = 1
for in_file in in_files:
subplot(nsubrows,nsubcols,ip)
plot_file_panel(in_file,params_start)
ip+=1
# do the actual fitting
ret = mpfit(funct, functkw={"times":time, "measured":period}, parinfo=params_info, iterfunct=None)
print("\nmpfit exited with status %i (1--4 is OK)\n"%ret.status)
# print the parameters in a tempo .par file format
print("PEPOCH %17.15g"%epoch)
print("P0 %17.15g"%ret.params[0])
print("BINARY BT")
print("A1 %17.15g"%ret.params[2])
print("E %17.15g"%ret.params[4])
print("T0 %17.15g"%(ret.params[3]+0.5*ret.params[0]/SECPERDAY))
print("PB %17.15g"%(ret.params[1]/SECPERDAY))
print("OM %17.15g"%ret.params[5])
print()
print("Generating plots...")
# make the plots...
amp = TWOPI*abs(ret.params[2])/ret.params[1] # 2 pi x / Porb
# plot the model for the full range of observations
if (max(time)-min(time) < 500*(ret.params[1]/SECPERDAY)): # but don't plot too many orbits...
figure(5)
model_time = arange(min(time)-0.1, max(time)+0.1, 0.001)
plot(model_time-model_time[0],doppler_period(ret.params, model_time)*1000.0,'r',alpha=0.5)
plot(time-model_time[0],period*1000.0,'o')
xlabel("Days + %.7f"%model_time[0])
ylabel("Pulsar Period (ms)")
# make more detailed plots around each observation
figure(3)
ip=1
for in_file in in_files:
subplot(nsubrows,nsubcols,ip)
plot_file_panel(in_file,ret.params)
ip+=1
show()
| 20,742 | 31.563579 | 115 | py |
presto | presto-master/bin/powerstats.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import input
from presto.events import *
def answer_yes(question):
yes = ['', 'Y', 'y', 'Yes', 'yes', 'YES',
'T', 't', 'True', 'true', 'TRUE']
return input('\n'+question) in yes
def ask_float(question, default=None):
while 1:
ans = input('\n'+question)
if not ans:
ans = default
try:
return float(ans)
except (ValueError, TypeError):
print("\nThat was not a valid number. Try again...\n")
def ask_int(question, default=None):
while 1:
ans = input('\n'+question)
if not ans:
ans = default
try:
return int(ans)
except (ValueError, TypeError):
print("\nThat was not a valid number. Try again...\n")
if __name__ == '__main__':
print("\nPower Statistics Calculation Routine")
conf = ask_float(\
"What confidence level would you like to use? [0.99] ", 0.99)
Ntot = ask_int(\
"How many data points were FFTd (N)? ")
dt = ask_float("What was the length in time (s) of each bin? ")
T = Ntot * dt
P_max = ask_float(\
"What was the maximum normalized power found? ")
rlo = 1
rhi = Ntot / 2
if answer_yes(\
"Was this an RZW acceleration search (y/n)? [y] "):
rlo = T * ask_float(\
"What was the lowest freq searched (Hz)? [1.0] ", 1.0)
rhi = T * ask_float(\
"What was the highest freq searched (Hz)? [%.2f] " %
((Ntot/2.0)/T), (Ntot/2.0)/T)
zlo = ask_float(\
"What was the lowest 'z' value searched? [-100] ", -100.0)
zhi = ask_float(\
"What was the highest 'z' value searched? [100] ", 100.0)
Nsearch = (rhi - rlo) * (zhi - zlo + 1.0) / 6.95
else:
Nsearch = ask_int(\
"How many independent bins were searched? [N/2] ", Ntot/2)
if answer_yes(\
"Was the data composed of binned counts (y/n)? [y] "):
numphot = ask_int("How many counts (photons) were there? ")
lofreq, hifreq = rlo / T, rhi / T
trial_freqs = (10.0**(Num.arange(7.0)-2.0)).tolist()
trial_freqs = [x for x in trial_freqs if x > lofreq and x < hifreq]
print("\nThe trial frequencies (Hz) are:", trial_freqs)
if answer_yes(\
"Would you like to add any more? [y] "):
new_freq = ask_float(\
"Enter a frequency (Hz) or '0' to stop. ")
while (new_freq):
trial_freqs.append(new_freq)
new_freq = ask_float(\
"Enter a frequency (Hz) or '0' to stop. ")
trial_freqs.sort()
print("\n\nCalculating...\n\n")
print("")
print(" Power Stats for Binned Data")
print(" -----------------------------------")
print(" Number of data points = %.0f" % Ntot)
print(" Time per sample (s) = %g" % dt)
print(" Total number of photons = %.0f" % numphot)
print(" Confidence Level = %g%%" % (100 * conf))
print(" Number of independent bins = %.2e" % Nsearch)
print(" Threshold Power (P_detect) > %.2f" % \
max_noise_power(Nsearch, 1, conf))
ulim = required_signal_power(P_max, 1, conf)
print(" Max Power Found (P_max) = %.2f" % P_max)
print(" Max Signal Power (P_limit) < %.2f" % ulim)
print(" Pulsed Fraction (P_limit) < %.3g" % \
pulsed_fraction_limit(numphot, ulim))
print("")
sens = []
ulim = []
for f in trial_freqs:
sens.append(binned_fft_sensitivity(Ntot, dt, f, Nsearch, 1, conf))
ulim.append(required_signal_power(P_max, 1, conf))
print(" Freq (Hz) = ", end=' ')
for f in trial_freqs:
print(" f=%-7g" % (f), end=' ')
print('\n '+'-'*len(trial_freqs)*11)
print(" Power Sensitivity > ", end=' ')
for s in sens:
print(" %-8.2f " % (s), end=' ')
print('')
pfract = []
for s in sens:
pfract.append(pulsed_fraction_limit(numphot, s))
print(" Pulsed Fraction < ", end=' ')
for p in pfract:
print(" %-8.3g " % (p), end=' ')
print('\n')
else:
print("\n\nCalculating...\n\n")
print("")
print(" Power Stats for Normal Data")
print(" -----------------------------------")
print(" Number of data points = %.0f" % Ntot)
print(" Confidence Level = %g%%" % (100 * conf))
print(" Number of independent bins = %.2e" % Nsearch)
print(" Threshold Power (P_detect) > %.2f" % \
max_noise_power(Nsearch/2, 1, conf))
sens = fft_sensitivity(Ntot, Nsearch, 1, conf)
print(" Power Sensitivity > %.2f" % sens)
ulim = required_signal_power(P_max, 1, conf)
print(" Max Power Found (P_max) = %.2f" % P_max)
print(" Max Signal Power (P_limit) < %.2f" % ulim)
print("")
| 5,220 | 39.161538 | 78 | py |
presto | presto-master/bin/orbellipsefit.py | #!/usr/bin/env python
# Fit an ellipse to a set of measured Periods and Accelerations to get an initial orbit estimate
# This uses the methods of Freire et al. (2001)
# This code written by Paul Ray <[email protected]
# Inputs are a set of .bestprof files or .par files from which the P0 and P1 (or F0 and F1) values
# and their errors are read. It can ignore points with too large an F1 error
#
from __future__ import print_function
from numpy import *
from pylab import *
from presto.psr_utils import *
from sys import argv
from presto import parfile
from matplotlib.patches import Ellipse
from scipy.optimize import leastsq
verbose = True
cspeed = 299792458.0 # m/s
def read_bestprof(filename,f1errmax=999.0):
infile = open(filename)
bary = N = 0
epoch = dt = p0 = p1 = p2 = 0.0
for line in infile.readlines():
if line[0]=="#":
if line.startswith("# T_sample"):
dt = float(line.split("=")[-1])
continue
if line.startswith("# Data Folded"):
N = float(line.split("=")[-1])
continue
if line.startswith("# Epoch_topo"):
try:
epochi = float(line.split("=")[-1].split(".")[0])
epochf = float("0."+line.split("=")[-1].split(".")[1])
epoch = epochi+epochf
except ValueError:
pass
continue
if line.startswith("# Epoch_bary"):
try:
epochi = float(line.split("=")[-1].split(".")[0])
epochf = float("0."+line.split("=")[-1].split(".")[1])
epoch = epochi+epochf
bary = 1
except ValueError:
pass
if ((bary and line.startswith("# P_bary")) or
(not bary and line.startswith("# P_topo"))):
valstr = line.split("=")[-1]
p0 = float(valstr.split("+")[0])/1000.0
p0err = float(valstr.split("+")[1][3:])/1000.0
continue
if ((bary and line.startswith("# P'_bary")) or
(not bary and line.startswith("# P'_topo"))):
valstr = line.split("=")[-1]
p1 = float(valstr.split("+")[0])
p1err = float(valstr.split("+")[1][3:])
continue
if ((bary and line.startswith("# P''_bary")) or
(not bary and line.startswith("# P''_topo"))):
p2 = float(line.split("=")[-1].split("+")[0])
continue
else:
break
f0,f0err,f1,f1err = pferrs(p0,p0err,p1,p1err)
print("%.4f %10.9g %8.3g %10.5g %8.3g" % (epoch,f0,f0err,f1,f1err), end=' ')
if (f1err > f1errmax):
print(" * Ignored *")
else:
print()
#print "----- ",filename
#print "PEPOCH ",epoch
#print "F0 ", f0
#print "F1 ", f1
return (epoch, N*dt, f0, f0err, f1, f1err)
def read_par(pfname,f1errmax=999.0):
pf = parfile.psr_par(pfname)
f0 = pf.F0
p0 = pf.P0
try:
f0err = pf.F0_ERR
except:
f0err = 2.0e-5
if not isfinite(f0err):
f0err = 3.0e-5
f1 = pf.F1
try:
p1 = pf.P1
except:
p1 = 0.0
try:
f1err = pf.F1_ERR
except:
f1err = 10.0e-8
mjd = pf.PEPOCH
if (verbose):
# print "%6s: %.4f F0 %10.9g +/- %8.03g F1 %10.5g +/- %8.03g" % (pfname,mjd,f0,f0err,f1,f1err)
print("%.4f %10.9g %8.3g %10.5g %8.3g" % (mjd,f0,f0err,f1,f1err), end=' ')
if (f1err > f1errmax):
print(" * Ignored *")
else:
print()
# print " P0 = %g, P1 = %g" % (p0,p1)
print("----- ",pfname)
print("PEPOCH ",mjd)
print("F0 ", f0)
print("F1 ", f1)
return mjd,f0,f0err,f1,f1err
def readPeriodAccelFromPars(parfilelist,f1errmax=3.0e-6):
mjds = []
f0s = []
f0errs = []
f1s = []
f1errs = []
accs = []
if (verbose):
print("MJD F0 F0_err F1 F1_err")
for fn in argv[1:]:
if fn.endswith('.bestprof'):
mjd,Tobs,f0,f0err,f1,f1err = read_bestprof(fn,f1errmax)
else:
mjd,f0,f0err,f1,f1err = read_par(fn,f1errmax)
mjds.append(mjd)
f0s.append(f0)
f0errs.append(f0err)
f1s.append(f1)
f1errs.append(f1err)
del f0,f1,f0err,f1err,mjd
mjds = array(mjds)
f0s = array(f0s)
f0errs = array(f0errs)
f1s = array(f1s)
f1errs = array(f1errs)
# Select only the values where Fdot is measured with some accuracy
idx = where(f1errs < f1errmax)
ps, perrs, p1s, p1errs = pferrs(f0s[idx],f0errs[idx],f1s[idx],f1errs[idx])
selmjds = mjds[idx]
accs = cspeed*p1s/ps
accerrs = absolute(accs*sqrt((p1errs/p1s)**2 + (perrs/ps)**2))
accfreqs = f0s[idx]
accfreqerrs = f0errs[idx]
return selmjds, ps, perrs, accs, accerrs
def parabola_funct(pars, x, y_measured, y_err):
'''Generic parabola fitting function.
pars is the array of parameters [p0, p1, p2].
Fit function is y = p2*x**2 + p1*x + p0
x, y_measured and y_err must all be same length'''
return ((pars[2]*x**2 + pars[1]*x + pars[0]) - y_measured)/y_err
def funct(pars, ps, Asq_measured, Asq_errs):
'''Fitting function from Eqn A1 of Freire et al. 2001.
pars[i] is the array of 3 parameters [a_0, a_1, a_2]
Asq_measured is the array of measures accelerations SQUARED
ps is the array of measured pulse periods.'''
return ((pars[2]*ps**2 + pars[1]*ps + pars[0]) - Asq_measured)/Asq_errs
def fitellipse(mjds, accs,accerrs,ps,P0_init,Porb_init,X_init):
'''Fit an orbit using Eqn A1 of Freire et al. 2001, MNRAS.
Period errors are assumed to be negligible.'''
asq = accs**2
asq_errs = 2*asq*(accerrs/accs)
apar_init = array([0.0, 0.0, 0.0])
# Init parameters based on initial orbit, using Eqs A2-A4.
A1 = 4.0*pi**2*X_init*cspeed/(Porb_init**2)
P1 = 2.0*pi*X_init*P0_init/Porb_init
apar_init[2] = -A1**2/(P1**2)
apar_init[1] = 2.0*P0_init*A1**2/(P1**2)
apar_init[0] = A1**2 - A1**2*P0_init**2/(P1**2)
#print "apar init = ",apar_init
out = leastsq(funct,apar_init,args=(ps,asq,asq_errs),full_output=1)
apar = out[0]
covar = out[1]
P0 = -apar[1]/(2.0*apar[2])
Porb = (2.0*pi*cspeed)/(P0*sqrt(-apar[2]))
X = Porb*sqrt(P0**2-apar[0]/apar[2])/(2.0*pi*P0)
A1 = 4.0*pi**2*X*cspeed/Porb**2
P1 = 2.0*pi*X*P0/Porb
figure(7)
errorbar(ps,asq,np.fabs(asq_errs),fmt='o')
xs = linspace(ps.min(),ps.max(),100)
plot(xs,apar_init[2]*xs**2 + apar_init[1]*xs + apar_init[0],'b--')
plot(xs,apar[2]*xs**2 + apar[1]*xs + apar[0],'r--')
title('Eqn A1 fit')
ylabel('Acc Squared')
xlabel('Period')
grid(1)
return P0, Porb, X, A1, P1
if __name__ == '__main__':
# First read the periods and accelerations from the parfiles
parfilelist = argv[1:]
if len(parfilelist)<1:
print("No par files specified")
sys.exit(1)
mjds,ps,perrs,accs,accerrs = readPeriodAccelFromPars(parfilelist,
f1errmax=3.0e-7)
print()
print("MJD :",mjds)
print("accs :",accs)
print("accerrs :",accerrs)
# Now setup initial parameter values based on observed periods and accs
P0_init = ps.mean()
P1_init = (ps.max()-ps.min())/2.0
A1_init = abs(accs).max()
Porb_init = 2.0*pi*cspeed*P1_init/(P0_init*A1_init)
X_init = P1_init**2*cspeed/(P0_init**2*A1_init)
vmin = cspeed*(ps.min()/P0_init - 1)
vmax = cspeed*(ps.max()/P0_init - 1)
print("vmin = %.2f km/s" % (vmin/1000.0,))
print("vmax = %.2f km/s" % (vmax/1000.0,))
print("amin = %.4f m/s^2" % (accs.min(),))
print("amax = %.4f m/s^2" % (accs.max(),))
print("pmin = %.6f ms" % (1000.0*ps.min(),))
print("pmax = %.6f ms" % (1000.0*ps.max(),))
print("Initial Values:")
print(" P0 = ",P0_init)
print(" Porb = %g s (%.3f days)" % (Porb_init,Porb_init/86400.0))
print(" X = ",X_init)
print(" A1 = ",A1_init)
print(" P1 = ",P1_init)
print()
# If enough points, do the ellipse fit
if len(mjds)>=3:
P0, Porb, X, A1, P1 = fitellipse(mjds,accs,accerrs,ps,P0_init,Porb_init,X_init)
print("Fitted Values:")
print(" P0 = ",P0)
print(" Porb = %g s (%.3f days)" % (Porb, Porb/86400.0))
print(" X = ",X)
print(" A1 = ",A1)
print(" P1 = ",P1)
#print "Mcomp,min = ",companion_mass_limit(Porb/86400.0,X)
else:
A1 = 0.0
P0 = 0.0
P1 = 0.0
# Plot initial and final ellipses with points
figure(1)
errorbar(1000.0*ps,accs,xerr=1000.0*perrs,yerr=accerrs,fmt='.')
grid(True)
ax = gca()
# Current initial ellipse is commented out
#ax.add_artist(Ellipse((P0_init*1000.0,0.0), 2*1000.0*P1_init, 2*A1_init,ls='dashed',color='blue',fc='none',lw=2))
if (A1>0.0 and P0 > 0.0 and P1 > 0.0):
ax.add_artist(Ellipse((P0*1000.0,0.0), 2*1000.0*P1, 2*A1,ls='dashed',color='red',fc='none',lw=2))
title('Acceleration vs. Period')
xlabel('Period (ms)')
ylabel('Acceleration (m/s^2)')
# With Porb and X determined, should look at Tasc
# This needs to be done...
if (A1 > 0.0) :
Porb_days = Porb/86400.0
phis = arctan(-accs*P1/(A1*(ps-P0)))
Tascs = mjds - phis*Porb_days/(2.0*pi)
# Make this do some kind of fit!
T0 = Tascs[0]
#print "phis = ",phis
#print "Tascs = ",Tascs
#print "Tascs in orbits = ",(Tascs-Tascs[0])/Porb_days
#figure(3)
#resids = (Tascs-Tascs[0])/Porb_days - floor((Tascs-Tascs[0])/Porb_days)
#plot(mjds,resids,'s')
#title('Tasc residuals')
#xlabel('Time (MJD)')
#grid(1)
print()
print("PAR file of fit: ")
print("P0 %.15f" % P0)
print("BINARY BT")
print("PB %.8f" % Porb_days)
print("A1 %.6f" % X)
print("T0 %.6f" % T0)
print("OM 0.0")
print("E 0.0")
show()
| 10,203 | 31.393651 | 118 | py |
presto | presto-master/bin/weights_to_ignorechan.py | #!/usr/bin/env python
import numpy as np
import sys
#import optparse
def read_weights(filename):
"""
reat_weights(filename):
Read the channels and weights from a .weights text file.
"""
chans, weights = np.loadtxt(filename, unpack=True)
return chans.astype(np.int64), weights.astype(np.float64)
def build_chanline(weights):
"""
build_chanline(weights):
Build an ignorechans line from a .weights file written by hand or rfifind.py
The ignorechans line can be used to ignore channels with prepdata, prepfold,
prepsubband, rfifind, and mpiprepsubband.
"""
N = len(weights)
outstr = ''
if N==0:
return outstr
lastbad = -2
ii = 0
while ii < len(weights):
if weights[ii] == 0.0:
if lastbad == ii-1:
outstr += '' if outstr[-1]==':' else ':'
else:
outstr += str(ii)
lastbad = ii
else:
if lastbad == ii-1:
outstr += str(ii-1)+',' if outstr[-1]==':' else ','
ii += 1
if outstr[-1]==':':
outstr += str(N-1)
return outstr if outstr[-1]!=',' else outstr[:-1]
def build_pazline(chanline):
"""
build_pazline(chanline):
Build a command line for paz from an ignorechan line
"""
outstr = 'paz -e zap '
for part in chanline.split(','):
outstr += "-Z '"+part.replace(":", " ")+"' " if ":" \
in part else "-z "+part+" "
return outstr
if __name__=="__main__":
if len(sys.argv) < 2:
print("\nusage: {} file\n".format(sys.argv[0]))
print('\nOptionally, save the ignore channel list to a text file with,\n')
print("\nusage: {} file <output fname>\n".format(sys.argv[0]))
sys.exit(1)
# Read the channels and weights
chans, weights = read_weights(sys.argv[1])
# Get the chanline
chanline = build_chanline(weights)
print(chanline)
# Convert it to a paz command
pazline = build_pazline(chanline)
print("\n"+pazline)
# if output file name provided store the channel list to a file
if len(sys.argv) == 3:
with open(sys.argv[2], 'w') as ignorechan:
ignorechan.write(chanline)
| 2,250 | 27.1375 | 89 | py |
presto | presto-master/bin/rfifind_stats.py | #!/usr/bin/env python
import sys
import argparse
from presto import rfifind
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("statsfile", type=str, help="rfifind .stats file to compute the channel mask")
parser.add_argument("--fpower", type=float,
default=200,
help="Fourier power threshold above which entire channel will be ignored, default = 200")
parser.add_argument("--band_edge_frac", type=float,
default=0.01, help="Fraction of band edge channels to ignore, default = 0.01")
parser.add_argument("--avgsigma", type=float,
default=2.,
help="Channel mean threshold above which entire channel will be ignored, default = 2")
parser.add_argument("--stdsigma", type=float,
default=2.,
help="Channel std threshold above which entire channel will be ignored, default = 2")
parser.add_argument("--invert", default=False, dest="invert", action="store_true",
help="Flag to invert weights in case raw data has decreasing freq channels")
args = parser.parse_args()
a = rfifind.rfifind(args.statsfile)
sys.stderr.write("\nWARNING!: If raw data have channels in decreasing freq\n")
sys.stderr.write(" order, the channel ordering as given will be\n")
sys.stderr.write(" inverted! Use 'invertband=True' in \n")
sys.stderr.write(" write_weights() in that case!\n")
if (a.idata.telescope == 'GBT' and a.idata.lofreq < 1000.0):
sys.stderr.write("Data is from GBT Prime Focus, auto-flipping the weights/offsets...\n\n")
invert = True
else:
invert = args.invert
# Write the bandpass before we zap things
a.write_bandpass(invertband=invert)
# Now do the zapping and set the weights
a.set_zap_chans(power=args.fpower,
edges=args.band_edge_frac,
asigma=args.avgsigma,
ssigma=args.stdsigma,
usemask=True,
plot=True,
chans=[])
a.write_zap_chans()
a.set_weights_and_offsets()
a.write_weights(invertband=invert)
#a.write_weights_and_offsets(invertband=invert)
| 2,335 | 47.666667 | 113 | py |
presto | presto-master/bin/single_pulse_search.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import str, zip
from optparse import OptionParser
import bisect, os, sys, glob
import numpy as np
import scipy, scipy.signal, scipy.stats
from presto.presto import rfft, next2_to_n
from presto import infodata
from presto.Pgplot import *
# This is for Python 2/3 comptibility
def mycmp(a, b):
return ((a > b) - (a < b))
class candidate(object):
def __init__(self, DM, sigma, time, bin, downfact):
self.DM = DM
self.sigma = sigma
self.time = time
self.bin = bin
self.downfact = downfact
def __str__(self):
return "%7.2f %7.2f %13.6f %10d %3d\n"%\
(self.DM, self.sigma, self.time, self.bin, self.downfact)
def __eq__(self, other):
return (self.bin == other.bin)
def __ne__(self, other):
return (self.bin != other.bin)
def __lt__(self, other):
return (self.bin < other.bin)
def __le__(self, other):
return (self.bin <= other.bin)
def __gt__(self, other):
return (self.bin > other.bin)
def __ge__(self, other):
return (self.bin >= other.bin)
def __cmp__(self, other):
# Sort by time (i.e. bin) by default)
return mycmp(self.bin, other.bin)
def cmp_sigma(self, other):
#Comparison function to sort candidates by significance
retval = -mycmp(self.sigma, other.sigma)
return retval
def fft_convolve(fftd_data, fftd_kern, lo, hi):
"""
fft_convolve(fftd_data, fftd_kern, lo, hi):
Perform a convolution with the complex floating point vectors
'fftd_data' and 'fftd_kern'. The returned vector will start at
at bin 'lo' (must be an integer), and go up to but not
include bin 'hi' (also an integer).
"""
# Note: The initial FFTs should be done like:
# fftd_kern = rfft(kernel, -1)
# fftd_data = rfft(data, -1)
prod = np.multiply(fftd_data, fftd_kern)
prod.real[0] = fftd_kern.real[0] * fftd_data.real[0]
prod.imag[0] = fftd_kern.imag[0] * fftd_data.imag[0]
return rfft(prod, 1)[lo:hi].astype(np.float32)
def make_fftd_kerns(downfacts, fftlen):
fftd_kerns = []
for downfact in downfacts:
kern = np.zeros(fftlen, dtype=np.float32)
# These offsets produce kernels that give results
# equal to scipy.signal.convolve
if downfact % 2: # Odd number
kern[:downfact//2+1] += 1.0
kern[-(downfact//2):] += 1.0
else: # Even number
kern[:downfact//2+1] += 1.0
if (downfact > 2):
kern[-(downfact//2-1):] += 1.0
# The following normalization preserves the
# RMS=1 characteristic of the data
fftd_kerns.append(rfft(kern / np.sqrt(downfact), -1))
return fftd_kerns
def prune_related1(hibins, hivals, downfact):
# Remove candidates that are close to other candidates
# but less significant. This one works on the raw
# candidate arrays and uses the single downfact
# that they were selected with.
toremove = set()
for ii in range(0, len(hibins)-1):
if ii in toremove: continue
xbin, xsigma = hibins[ii], hivals[ii]
for jj in range(ii+1, len(hibins)):
ybin, ysigma = hibins[jj], hivals[jj]
if (abs(ybin-xbin) > downfact//2):
break
else:
if jj in toremove:
continue
if (xsigma > ysigma):
toremove.add(jj)
else:
toremove.add(ii)
# Now zap them starting from the end
toremove = sorted(toremove, reverse=True)
for bin in toremove:
del(hibins[bin])
del(hivals[bin])
return hibins, hivals
def prune_related2(dm_candlist, downfacts):
# Remove candidates that are close to other candidates
# but less significant. This one works on the candidate
# instances and looks at the different downfacts of the
# the different candidates.
toremove = set()
for ii in range(0, len(dm_candlist)-1):
if ii in toremove: continue
xx = dm_candlist[ii]
xbin, xsigma = xx.bin, xx.sigma
for jj in range(ii+1, len(dm_candlist)):
yy = dm_candlist[jj]
ybin, ysigma = yy.bin, yy.sigma
if (abs(ybin-xbin) > max(downfacts)//2):
break
else:
if jj in toremove:
continue
prox = max([xx.downfact//2, yy.downfact//2, 1])
if (abs(ybin-xbin) <= prox):
if (xsigma > ysigma):
toremove.add(jj)
else:
toremove.add(ii)
# Now zap them starting from the end
toremove = sorted(toremove, reverse=True)
for bin in toremove:
del(dm_candlist[bin])
return dm_candlist
def prune_border_cases(dm_candlist, offregions):
# Ignore those that are located within a half-width
# of the boundary between data and padding
#print offregions
toremove = set()
for ii in range(len(dm_candlist)-1, -1, -1):
cand = dm_candlist[ii]
loside = cand.bin-cand.downfact/2
hiside = cand.bin+cand.downfact/2
if hiside < offregions[0][0]: break
for off, on in offregions:
if (hiside > off and loside < on):
toremove.add(ii)
# Now zap them starting from the end
toremove = sorted(toremove, reverse=True)
for ii in toremove:
del(dm_candlist[ii])
return dm_candlist
full_usage = """
usage: single_pulse_search.py [options] .dat files _or_ .singlepulse files
[-h, --help] : Display this help
[-m, --maxwidth] : Set the max downsampling in sec (see below for default)
[-p, --noplot] : Look for pulses but do not generate a plot
[-t, --threshold] : Set a different threshold SNR (default=5.0)
[-x, --xwin] : Don't make a postscript plot, just use an X-window
[-s, --start] : Only plot events occuring after this time (s)
[-e, --end] : Only plot events occuring before this time (s)
[-g, --glob] : Use the files from these glob expressions (in quotes)
[-f, --fast] : Use a less-accurate but much faster method of detrending
[-b, --nobadblocks] : Don't check for bad-blocks (may save strong pulses)
[-d, --detrendlen] : Chunksize for detrending (pow-of-2 in 1000s, default=1)
Perform a single-pulse search (or simply re-plot the results of a
single-pulse search) on a set of de-dispersed time series (.dat
files).
The search attempts to find pulses by matched-filtering the data with
a series of different width boxcar functions. The possible boxcar
sizes are [1, 2, 3, 4, 6, 9, 14, 20, 30, 45, 70, 100, 150, 220, 300]
bins. By default the boxcars <= 30 are used. You can specify
that the larger boxcars are used with the -m (or --maxwidth) option.
The matched filtering (and accounting for all the possible 'phase'
offsets of each boxcar) is accomplished by convolving the boxcars
with the full resolution data. 'Duplicate' candidates from this
process are filtered, leaving only the most significant. The time
series are initially smoothed (by default) using a piecewise linear
fit to the data where each piece is 1000 data points long.
If the input files are .singlepulse files, we won't actually perform
a search, we'll only read in the output .singlepulse files and make
a plot using the information they contain (along with the
corresponding .inf files).
Notes on usage and performance:
-- single_pulse_search.py is tuned for finding *narrow* pulses
(i.e. those of only a few bins width). Because of this, you
should always search appropriately downsampled data (as
recommended by DDplan.py, for instance) where dispersion
smearing is <~ 1 time series bin.
-- the linear-piecewise detrending is very useful in long
observations with modern instrumentation where you can see
long timescale power fluctuations. Strong pulses can skew the
statistics of the 1000-bin chunks, though, and caused some
suppression in the detection levels of bright pulses (weak
pulses are mostly unaffected since they don't strongly change
the statistics). If your data have no long-timescale
fluctuations (for instance, if you are processing old 1-bit
analog filterbank data which is AC-coupled or if you remove
rednoise via realfft/rednoise/(inverse-)realfft), I recommend
using the -f/--fast flag. And if you want to find wide
pulses, it might be worth making the chunksize bigger (i.e.
4000 or 8000).
-- The bad-block detection and removal code can and does remove
blocks that have very strong, and particularly strong and broad,
pulses in them. It can also quite effectively remove RFI-
infused portions of the data. Whether to turn it on or off
depends on your data. Note that if there are multiple pulses,
only the brightest will usually be "bad-blocked" and removed.
-- The fourier-domain matched filtering used here has no phase-
dependent effects. So a 15-bin pulse can be found with equal
significance no matter which bin it starts in in the time series.
-- The definition of "sigma" used is possibly slightly different
from that used in other codes for S/N:
sigma = sum(signal-bkgd_level)/RMS/sqrt(boxcar_width)
where the bkgd_level is typically 0 after detrending and RMS=1
after normalization. This definition has the advantage that
you will get (basically) the same sigma for any pulse no
matter how much the input time series has been downsampled as
long as the pulse is still resolved.
Copyright Scott Ransom <[email protected]>, 2015
"""
usage = "usage: %prog [options] .dat files _or_ .singlepulse files"
def read_singlepulse_files(infiles, threshold, T_start, T_end):
DMs = []
candlist = []
num_v_DMstr = {}
for ii, infile in enumerate(infiles):
if infile.endswith(".singlepulse"):
filenmbase = infile[:infile.rfind(".singlepulse")]
else:
filenmbase = infile
info = infodata.infodata(filenmbase+".inf")
DMstr = "%.2f"%info.DM
DMs.append(info.DM)
num_v_DMstr[DMstr] = 0
if ii==0:
info0 = info
if os.stat(infile)[6]:
try:
cands = np.loadtxt(infile)
if len(cands.shape)==1:
cands = np.asarray([cands])
for cand in cands:
if cand[2] < T_start: continue
if cand[2] > T_end: break
if cand[1] >= threshold:
candlist.append(candidate(*cand))
num_v_DMstr[DMstr] += 1
except: # No candidates in the file
IndexError
DMs.sort()
return info0, DMs, candlist, num_v_DMstr
def main():
parser = OptionParser(usage)
parser.add_option("-x", "--xwin", action="store_true", dest="xwin",
default=False, help="Don't make a postscript plot, just use an X-window")
parser.add_option("-p", "--noplot", action="store_false", dest="makeplot",
default=True, help="Look for pulses but do not generate a plot")
parser.add_option("-m", "--maxwidth", type="float", dest="maxwidth", default=0.0,
help="Set the max downsampling in sec (see below for default)")
parser.add_option("-t", "--threshold", type="float", dest="threshold", default=5.0,
help="Set a different threshold SNR (default=5.0)")
parser.add_option("-s", "--start", type="float", dest="T_start", default=0.0,
help="Only plot events occuring after this time (s)")
parser.add_option("-e", "--end", type="float", dest="T_end", default=1e9,
help="Only plot events occuring before this time (s)")
parser.add_option("-g", "--glob", type="string", dest="globexp", default=None,
help="Process the files from this glob expression")
parser.add_option("-f", "--fast", action="store_true", dest="fast",
default=False, help="Use a faster method of de-trending (2x speedup)")
parser.add_option("-b", "--nobadblocks", action="store_false", dest="badblocks",
default=True, help="Don't check for bad-blocks (may save strong pulses)")
parser.add_option("-d", "--detrendlen", type="int", dest="detrendfact", default=1,
help="Chunksize for detrending (pow-of-2 in 1000s)")
(opts, args) = parser.parse_args()
if len(args)==0:
if opts.globexp==None:
print(full_usage)
sys.exit(0)
else:
args = []
for globexp in opts.globexp.split():
args += glob.glob(globexp)
useffts = True
dosearch = True
if opts.xwin:
pgplot_device = "/XWIN"
else:
pgplot_device = ""
fftlen = 8192 # Should be a power-of-two for best speed
chunklen = 8000 # Must be at least max_downfact less than fftlen
assert(opts.detrendfact in [1,2,4,8,16,32])
detrendlen = opts.detrendfact*1000
if (detrendlen > chunklen):
chunklen = detrendlen
fftlen = int(next2_to_n(chunklen))
blocks_per_chunk = chunklen // detrendlen
overlap = (fftlen - chunklen) // 2
worklen = chunklen + 2*overlap # currently it is fftlen...
max_downfact = 30
default_downfacts = [2, 3, 4, 6, 9, 14, 20, 30, 45, 70, 100, 150, 220, 300]
if args[0].endswith(".singlepulse"):
filenmbase = args[0][:args[0].rfind(".singlepulse")]
dosearch = False
elif args[0].endswith(".dat"):
filenmbase = args[0][:args[0].rfind(".dat")]
else:
filenmbase = args[0]
# Don't do a search, just read results and plot
if not dosearch:
info, DMs, candlist, num_v_DMstr = \
read_singlepulse_files(args, opts.threshold, opts.T_start, opts.T_end)
orig_N, orig_dt = int(info.N), info.dt
obstime = orig_N * orig_dt
else:
DMs = []
candlist = []
num_v_DMstr = {}
# Loop over the input files
for filenm in args:
if filenm.endswith(".dat"):
filenmbase = filenm[:filenm.rfind(".dat")]
else:
filenmbase = filenm
info = infodata.infodata(filenmbase+".inf")
DMstr = "%.2f"%info.DM
DMs.append(info.DM)
N, dt = int(info.N), info.dt
obstime = N * dt
# Choose the maximum width to search based on time instead
# of bins. This helps prevent increased S/N when the downsampling
# changes as the DM gets larger.
if opts.maxwidth > 0.0:
downfacts = [x for x in default_downfacts if x*dt <= opts.maxwidth]
else:
downfacts = [x for x in default_downfacts if x <= max_downfact]
if len(downfacts) == 0:
downfacts = [default_downfacts[0]]
if (filenm == args[0]):
orig_N = N
orig_dt = dt
if useffts:
fftd_kerns = make_fftd_kerns(default_downfacts, fftlen)
if info.breaks:
offregions = list(zip([x[1] for x in info.onoff[:-1]],
[x[0] for x in info.onoff[1:]]))
# If last break spans to end of file, don't read it in (its just padding)
if offregions[-1][1] == N - 1:
N = offregions[-1][0] + 1
outfile = open(filenmbase+'.singlepulse', mode='w')
# Compute the file length in detrendlens
roundN = N // detrendlen * detrendlen
numchunks = roundN // chunklen
# Read in the file
print('Reading "%s"...'%filenm)
timeseries = np.fromfile(filenm, dtype=np.float32, count=roundN)
# Split the timeseries into chunks for detrending
numblocks = roundN // detrendlen
timeseries.shape = (numblocks, detrendlen)
stds = np.zeros(numblocks, dtype=np.float64)
# de-trend the data one chunk at a time
print(' De-trending the data and computing statistics...')
for ii, chunk in enumerate(timeseries):
if opts.fast: # use median removal instead of detrending (2x speedup)
tmpchunk = chunk.copy()
tmpchunk.sort()
med = tmpchunk[detrendlen//2]
chunk -= med
tmpchunk -= med
else:
# The detrend calls are the most expensive in the program
timeseries[ii] = scipy.signal.detrend(chunk, type='linear')
tmpchunk = timeseries[ii].copy()
tmpchunk.sort()
# The following gets rid of (hopefully) most of the
# outlying values (i.e. power dropouts and single pulses)
# If you throw out 5% (2.5% at bottom and 2.5% at top)
# of random gaussian deviates, the measured stdev is ~0.871
# of the true stdev. Thus the 1.0/0.871=1.148 correction below.
# The following is roughly .std() since we already removed the median
stds[ii] = np.sqrt((tmpchunk[detrendlen//40:-detrendlen//40]**2.0).sum() /
(0.95*detrendlen))
stds *= 1.148
# sort the standard deviations and separate those with
# very low or very high values
sort_stds = stds.copy()
sort_stds.sort()
# identify the differences with the larges values (this
# will split off the chunks with very low and very high stds
locut = (sort_stds[1:numblocks//2+1] -
sort_stds[:numblocks//2]).argmax() + 1
hicut = (sort_stds[numblocks//2+1:] -
sort_stds[numblocks//2:-1]).argmax() + numblocks//2 - 2
std_stds = np.std(sort_stds[locut:hicut])
median_stds = sort_stds[(locut+hicut)//2]
print(" pseudo-median block standard deviation = %.2f" % (median_stds))
if (opts.badblocks):
lo_std = median_stds - 4.0 * std_stds
hi_std = median_stds + 4.0 * std_stds
# Determine a list of "bad" chunks. We will not search these.
bad_blocks = np.nonzero((stds < lo_std) | (stds > hi_std))[0]
print(" identified %d bad blocks out of %d (i.e. %.2f%%)" % \
(len(bad_blocks), len(stds),
100.0*float(len(bad_blocks))/float(len(stds))))
stds[bad_blocks] = median_stds
else:
bad_blocks = []
print(" Now searching...")
# Now normalize all of the data and reshape it to 1-D
timeseries /= stds[:,np.newaxis]
timeseries.shape = (roundN,)
# And set the data in the bad blocks to zeros
# Even though we don't search these parts, it is important
# because of the overlaps for the convolutions
for bad_block in bad_blocks:
loind, hiind = bad_block*detrendlen, (bad_block+1)*detrendlen
timeseries[loind:hiind] = 0.0
# Convert to a set for faster lookups below
bad_blocks = set(bad_blocks)
# Step through the data
dm_candlist = []
for chunknum in range(numchunks):
loind = chunknum*chunklen-overlap
hiind = (chunknum+1)*chunklen+overlap
# Take care of beginning and end of file overlap issues
if (chunknum==0): # Beginning of file
chunk = np.zeros(worklen, dtype=np.float32)
chunk[overlap:] = timeseries[loind+overlap:hiind]
elif (chunknum==numchunks-1): # end of the timeseries
chunk = np.zeros(worklen, dtype=np.float32)
chunk[:-overlap] = timeseries[loind:hiind-overlap]
else:
chunk = timeseries[loind:hiind]
# Make a set with the current block numbers
lowblock = blocks_per_chunk * chunknum
currentblocks = set(np.arange(blocks_per_chunk) + lowblock)
localgoodblocks = np.asarray(list(currentblocks -
bad_blocks)) - lowblock
# Search this chunk if it is not all bad
if len(localgoodblocks):
# This is the good part of the data (end effects removed)
goodchunk = chunk[overlap:-overlap]
# need to pass blocks/chunklen, localgoodblocks
# dm_candlist, dt, opts.threshold to cython routine
# Search non-downsampled data first
# NOTE: these nonzero() calls are some of the most
# expensive calls in the program. Best bet would
# probably be to simply iterate over the goodchunk
# in C and append to the candlist there.
hibins = np.flatnonzero(goodchunk>opts.threshold)
hivals = goodchunk[hibins]
hibins += chunknum * chunklen
hiblocks = hibins // detrendlen
# Add the candidates (which are sorted by bin)
for bin, val, block in zip(hibins, hivals, hiblocks):
if block not in bad_blocks:
time = bin * dt
dm_candlist.append(candidate(info.DM, val, time, bin, 1))
# Prepare our data for the convolution
if useffts: fftd_chunk = rfft(chunk, -1)
# Now do the downsampling...
for ii, downfact in enumerate(downfacts):
if useffts:
# Note: FFT convolution is faster for _all_ downfacts, even 2
goodchunk = fft_convolve(fftd_chunk, fftd_kerns[ii],
overlap, -overlap)
else:
# The normalization of this kernel keeps the post-smoothing RMS = 1
kernel = np.ones(downfact, dtype=np.float32) / \
np.sqrt(downfact)
smoothed_chunk = scipy.signal.convolve(chunk, kernel, 1)
goodchunk = smoothed_chunk[overlap:-overlap]
#hibins = np.nonzero(goodchunk>opts.threshold)[0]
hibins = np.flatnonzero(goodchunk>opts.threshold)
hivals = goodchunk[hibins]
hibins += chunknum * chunklen
hiblocks = hibins // detrendlen
hibins = hibins.tolist()
hivals = hivals.tolist()
# Now walk through the new candidates and remove those
# that are not the highest but are within downfact/2
# bins of a higher signal pulse
hibins, hivals = prune_related1(hibins, hivals, downfact)
# Insert the new candidates into the candlist, but
# keep it sorted...
for bin, val, block in zip(hibins, hivals, hiblocks):
if block not in bad_blocks:
time = bin * dt
bisect.insort(dm_candlist,
candidate(info.DM, val, time, bin, downfact))
# Now walk through the dm_candlist and remove the ones that
# are within the downsample proximity of a higher
# signal-to-noise pulse
dm_candlist = prune_related2(dm_candlist, downfacts)
print(" Found %d pulse candidates"%len(dm_candlist))
# Get rid of those near padding regions
if info.breaks: prune_border_cases(dm_candlist, offregions)
# Write the pulses to an ASCII output file
if len(dm_candlist):
#dm_candlist.sort(cmp_sigma)
outfile.write("# DM Sigma Time (s) Sample Downfact\n")
for cand in dm_candlist:
outfile.write(str(cand))
outfile.close()
# Add these candidates to the overall candidate list
for cand in dm_candlist:
candlist.append(cand)
num_v_DMstr[DMstr] = len(dm_candlist)
if (opts.makeplot):
# Step through the candidates to make a SNR list
DMs.sort()
snrs = []
for cand in candlist:
if not np.isinf(cand.sigma):
snrs.append(cand.sigma)
if snrs:
maxsnr = max(int(max(snrs)), int(opts.threshold)) + 3
else:
maxsnr = int(opts.threshold) + 3
# Generate the SNR histogram
snrs = np.asarray(snrs)
(num_v_snr, edges) = np.histogram(snrs,
int(maxsnr-opts.threshold+1),
[opts.threshold, maxsnr])
snrs = edges[:-1] + 0.5*(edges[1]-edges[0])
num_v_snr = num_v_snr.astype(np.float32)
num_v_snr[num_v_snr==0.0] = 0.001
# Generate the DM histogram
num_v_DM = np.zeros(len(DMs))
for ii, DM in enumerate(DMs):
num_v_DM[ii] = num_v_DMstr["%.2f"%DM]
DMs = np.asarray(DMs)
# open the plot device
short_filenmbase = filenmbase[:filenmbase.find("_DM")]
if opts.T_end > obstime:
opts.T_end = obstime
if pgplot_device:
ppgplot.pgopen(pgplot_device)
else:
if (opts.T_start > 0.0 or opts.T_end < obstime):
ppgplot.pgopen(short_filenmbase+'_%.0f-%.0fs_singlepulse.ps/VPS'%
(opts.T_start, opts.T_end))
else:
ppgplot.pgopen(short_filenmbase+'_singlepulse.ps/VPS')
ppgplot.pgpap(7.5, 1.0) # Width in inches, aspect
# plot the SNR histogram
ppgplot.pgsvp(0.06, 0.31, 0.6, 0.87)
ppgplot.pgswin(opts.threshold, maxsnr,
np.log10(0.5), np.log10(2*max(num_v_snr)))
ppgplot.pgsch(0.8)
ppgplot.pgbox("BCNST", 0, 0, "BCLNST", 0, 0)
ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Signal-to-Noise")
ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Number of Pulses")
ppgplot.pgsch(1.0)
ppgplot.pgbin(snrs, np.log10(num_v_snr), 1)
# plot the DM histogram
ppgplot.pgsvp(0.39, 0.64, 0.6, 0.87)
# Add [1] to num_v_DM in YMAX below so that YMIN != YMAX when max(num_v_DM)==0
ppgplot.pgswin(min(DMs)-0.5, max(DMs)+0.5, 0.0, 1.1*max(num_v_DM+[1]))
ppgplot.pgsch(0.8)
ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, r"DM (pc cm\u-3\d)")
ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Number of Pulses")
ppgplot.pgsch(1.0)
ppgplot.pgbin(DMs, num_v_DM, 1)
# plot the SNR vs DM plot
ppgplot.pgsvp(0.72, 0.97, 0.6, 0.87)
ppgplot.pgswin(min(DMs)-0.5, max(DMs)+0.5, opts.threshold, maxsnr)
ppgplot.pgsch(0.8)
ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, r"DM (pc cm\u-3\d)")
ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Signal-to-Noise")
ppgplot.pgsch(1.0)
cand_ts = np.zeros(len(candlist), dtype=np.float32)
cand_SNRs = np.zeros(len(candlist), dtype=np.float32)
cand_DMs = np.zeros(len(candlist), dtype=np.float32)
for ii, cand in enumerate(candlist):
cand_ts[ii], cand_SNRs[ii], cand_DMs[ii] = \
cand.time, cand.sigma, cand.DM
ppgplot.pgpt(cand_DMs, cand_SNRs, 20)
# plot the DM vs Time plot
ppgplot.pgsvp(0.06, 0.97, 0.08, 0.52)
ppgplot.pgswin(opts.T_start, opts.T_end, min(DMs)-0.5, max(DMs)+0.5)
ppgplot.pgsch(0.8)
ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time (s)")
ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, r"DM (pc cm\u-3\d)")
# Circles are symbols 20-26 in increasing order
snr_range = 12.0
cand_symbols = (cand_SNRs-opts.threshold)/snr_range * 6.0 + 20.5
cand_symbols = cand_symbols.astype(np.int32)
cand_symbols[cand_symbols>26] = 26
for ii in [26, 25, 24, 23, 22, 21, 20]:
inds = np.nonzero(cand_symbols==ii)[0]
ppgplot.pgpt(cand_ts[inds], cand_DMs[inds], ii)
# Now fill the infomation area
ppgplot.pgsvp(0.05, 0.95, 0.87, 0.97)
ppgplot.pgsch(1.0)
ppgplot.pgmtxt('T', 0.5, 0.0, 0.0,
"Single pulse results for '%s'"%short_filenmbase)
ppgplot.pgsch(0.8)
# first row
ppgplot.pgmtxt('T', -1.1, 0.02, 0.0, 'Source: %s'%\
info.object)
ppgplot.pgmtxt('T', -1.1, 0.33, 0.0, 'RA (J2000):')
ppgplot.pgmtxt('T', -1.1, 0.5, 0.0, info.RA)
ppgplot.pgmtxt('T', -1.1, 0.73, 0.0, 'N samples: %.0f'%orig_N)
# second row
ppgplot.pgmtxt('T', -2.4, 0.02, 0.0, 'Telescope: %s'%\
info.telescope)
ppgplot.pgmtxt('T', -2.4, 0.33, 0.0, 'DEC (J2000):')
ppgplot.pgmtxt('T', -2.4, 0.5, 0.0, info.DEC)
ppgplot.pgmtxt('T', -2.4, 0.73, 0.0, 'Sampling time: %.2f \gms'%\
(orig_dt*1e6))
# third row
if info.instrument.find("pigot") >= 0:
instrument = "Spigot"
else:
instrument = info.instrument
ppgplot.pgmtxt('T', -3.7, 0.02, 0.0, 'Instrument: %s'%instrument)
if (info.bary):
ppgplot.pgmtxt('T', -3.7, 0.33, 0.0, r'MJD\dbary\u: %.12f'%info.epoch)
else:
ppgplot.pgmtxt('T', -3.7, 0.33, 0.0, r'MJD\dtopo\u: %.12f'%info.epoch)
ppgplot.pgmtxt('T', -3.7, 0.73, 0.0, r'Freq\dctr\u: %.1f MHz'%\
((info.numchan/2-0.5)*info.chan_width+info.lofreq))
ppgplot.pgiden()
ppgplot.pgend()
if __name__ == '__main__':
if (0):
# The following is for profiling
import hotshot
prof = hotshot.Profile("hotshot_edi_stats")
prof.runcall(main)
prof.close()
# To see the results:
if (0):
from presto.hotshot import stats
s = stats.load("hotshot_edi_stats")
s.sort_stats("time").print_stats()
else:
# print(sys.version)
main()
| 31,275 | 44.261939 | 95 | py |
presto | presto-master/bin/tim2dat.py | #!/usr/bin/env python
from __future__ import print_function
import astropy.coordinates as coords
from builtins import str
import os
import os.path
import argparse
import sys
import getpass
import numpy as np
from presto import sigproc
BLOCKSIZE = 10000 # Amount of data to copy at a time
# from input file to output file (in samples)
def write_inf_file(datfn, hdr, hdrlen):
"""Write a PRESTO .inf file given a .dat file and
a dictionary of SIGPROC-style header values.
Inputs:
datfn: The PRESTO .dat file to write a .inf file for.
hdr: A dictionary of SIGPROC header values, as produced
by PRESTO's sigproc.read_header.
hdrlen: Length (in bytes) of SIGPROC file's header.
Output:
inffn: The corresponding .inf file that is created.
"""
if not datfn.endswith(".dat"):
raise ValueError("Was expecting a file name ending with '.dat'. "
"Got: %s" % datfn)
size = os.path.getsize(datfn)
if size % 4:
raise ValueError("Bad size (%d bytes) for PRESTO .dat file (%s)"
"Should be multiple of 4 because samples are "
"32-bit floats." % (size, datfn))
N = size / 4 # Number of samples
pos = coords.SkyCoord(sigproc.ra2radians(hdr['src_raj']),
sigproc.dec2radians(hdr['src_dej']),
frame='icrs', unit='rad')
rastr, decstr = pos.to_string('hmsdms', sep=':',
precision=4, pad=True).split()
inffn = datfn[:-4]+".inf"
with open(inffn, 'w') as ff:
ff.write(" Data file name without suffix = %s\n" %
os.path.basename(datfn))
ff.write(" Telescope used = %s\n" %
sigproc.ids_to_telescope[hdr['telescope_id']])
ff.write(" Instrument used = %s\n" %
sigproc.ids_to_machine.get('machine_id', 'UNKNOWN'))
ff.write(" Object being observed = %s\n" %
hdr['source_name'])
ff.write(" J2000 Right Ascension (hh:mm:ss.ssss) = %s\n" %
rastr)
ff.write(" J2000 Declination (dd:mm:ss.ssss) = %s\n" %
decstr)
ff.write(" Data observed by = UNKNOWN\n")
ff.write(" Epoch of observation (MJD) = %05.15f\n" %
hdr['tstart'])
ff.write(" Barycentered? (1=yes, 0=no) = %d\n" %
hdr['barycentric'])
ff.write(" Number of bins in the time series = %d\n" % N)
ff.write(" Width of each time series bin (sec) = %.15g\n" %
hdr['tsamp'])
ff.write(" Any breaks in the data? (1 yes, 0 no) = 0\n")
if hdr.has_key('pulsarcentric'):
ff.write(" Orbit removed? (1=yes, 0=no) = %d\n" %
hdr['pulsarcentric'])
ff.write(" Dispersion measure (cm-3 pc) = %f\n" %
hdr['refdm'])
ff.write(" Central freq of low channel (Mhz) = %f\n" %
hdr['fch1'])
if hdr.has_key('foff'):
ff.write(" Total bandwidth (Mhz) = %f\n" %
(hdr['nchans']*hdr['foff']))
else: # what else can we do?
ff.write(" Total bandwidth (Mhz) = %f\n" %
100.0)
ff.write(" Number of channels = %d\n" %
hdr['nchans'])
if hdr.has_key('foff'):
ff.write(" Channel bandwidth (Mhz) = %d\n" %
hdr['foff'])
else: # what else can we do?
ff.write(" Channel bandwidth (Mhz) = %d\n" %
100.0)
ff.write(" Data analyzed by = %s\n" %
getpass.getuser())
ff.write(" Any additional notes:\n"
" File converted from SIGPROC .tim time series\n"
" with PRESTO's tim2dat.py, written by Patrick Lazarus\n")
return inffn
def convert_tim_to_dat(tim):
"""Convert a SIGPROC time series .tim file to a
PRESTO .dat time series
Input:
tim: The SIGPROC .tim time series file to convert.
Output:
datfn: The PRESTO .dat time series file
"""
if not tim.endswith(".tim"):
raise ValueError("Was expecting a file name ending with '.tim'. "
"Got: %s" % tim)
path, fn = os.path.split(tim)
basenm = fn[:-4]
outfn = os.path.join(path, basenm+".dat")
hdr, hdrlen = sigproc.read_header(tim)
N = sigproc.samples_per_file(tim, hdr, hdrlen)
Ndone = 0
status = -1
with open(tim, 'rb') as inff, open(outfn, 'wb') as outff:
inff.seek(hdrlen)
data = np.fromfile(inff, dtype='float32', count=BLOCKSIZE)
while data.size:
data.tofile(outff)
Ndone += data.size
data = np.fromfile(inff, dtype='float32', count=BLOCKSIZE)
newstatus = int(100.0*Ndone/N)
if newstatus > status:
sys.stdout.write(" %d %%\r" % newstatus)
sys.stdout.flush()
status = newstatus
return outfn
def main():
for tim in args.timfiles:
print("Working on %s" % tim)
if args.write_dat:
try:
datfn = convert_tim_to_dat(tim)
print(" Wrote PRESTO time series: %s" % datfn)
except ValueError as e:
sys.stderr.write("Error encountered when converting on %s" % tim)
sys.stderr.write(str(e))
else:
datfn = tim[-3:]+"dat"
hdr, hdrlen = sigproc.read_header(tim)
inffn = write_inf_file(datfn, hdr, hdrlen)
print(" Wrote info data: %s" % inffn)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("timfiles", nargs="+",
help="SIGPROC .tim time series files to convert to "
"PRESTO *.dat time series files")
parser.add_argument("--inf-only", dest='write_dat', action='store_false',
help="Only produce the .inf file, not the .dat file."
"(Default: create both .dat and .inf files)")
args = parser.parse_args()
main()
| 6,508 | 40.196203 | 81 | py |
presto | presto-master/bin/fb_truncate.py | #!/usr/bin/env python
"""
A script to truncate a filterbank file in time/frequency.
Patrick Lazarus, Aug 27, 2012
"""
from __future__ import print_function
import sys
import copy
from argparse import ArgumentParser
import numpy as np
from presto import filterbank
BLOCKSIZE = 1e5 # Number of spectra to manipulate at once
def main(args):
infn = args[0]
print("Reading filterbank file (%s)" % infn)
fil = filterbank.FilterbankFile(infn)
if options.start_time is None:
startbin = 0
else:
startbin = int(np.round(options.start_time/fil.tsamp))
if options.end_time is None:
endbin = fil.nspec
else:
endbin = int(np.round(options.end_time/fil.tsamp))+1
new_nspec = endbin-startbin
if new_nspec <= 0:
raise ValueError("Bad number of spectra to be written (%d). " \
"Check start/end times." % new_nspec)
# Determine lo/hi channels to write to file
# If high frequencies come first in spectra 'hichan' refers to
# the lo-freq cutoff and 'lochan' refers to the hi-freq cutoff.
if options.lo_freq is None:
if fil.foff > 0:
lochan = 0
else:
hichan = fil.nchans
else:
ichan = int(np.round((options.lo_freq-fil.fch1)/fil.foff))
if fil.foff > 0:
lochan = ichan
else:
hichan = ichan+1
if options.hi_freq is None:
if fil.foff > 0:
hichan = fil.nchans
else:
lochan = 0
else:
ichan = int(np.round((options.hi_freq-fil.fch1)/fil.foff))
if fil.foff > 0:
hichan = ichan+1
else:
lochan = ichan
new_nchans = hichan-lochan
if new_nchans <= 0:
raise ValueError("Bad number of channels to be written (%d). " \
"Check lo/hi frequencies." % new_nchans)
print("Will extract")
print(" %d bins (%d to %d incl.)" % (new_nspec, startbin, endbin-1))
print(" (Original num bins: %d)" % fil.nspec)
print(" %d channels (%d to %d incl.)" % (new_nchans, lochan, hichan-1))
print(" (Original num chans: %d)" % fil.nchans)
# Create output file
outfn = options.outname % fil.header
print("Creating out file: %s" % outfn)
outhdr = copy.deepcopy(fil.header)
outhdr['nchans'] = new_nchans
outhdr['fch1'] = fil.frequencies[lochan]
filterbank.create_filterbank_file(outfn, outhdr, nbits=fil.nbits)
outfil = filterbank.FilterbankFile(outfn, mode='write')
# Write data
sys.stdout.write(" %3.0f %%\r" % 0)
sys.stdout.flush()
nblocks = int(new_nspec/options.block_size)
remainder = new_nspec % options.block_size
oldprogress = -1
for iblock in np.arange(nblocks):
lobin = iblock*options.block_size + startbin
hibin = lobin+options.block_size
spectra = fil.get_spectra(lobin, hibin)
spectra = spectra[:,lochan:hichan] # restrict channels
outfil.append_spectra(spectra)
progress = int(100.0*((hibin-startbin)/new_nspec))
if progress > oldprogress:
sys.stdout.write(" %3.0f %%\r" % progress)
sys.stdout.flush()
oldprogress = progress
# Read all remaining spectra
if remainder:
spectra = fil.get_spectra(endbin-remainder, endbin)
spectra = spectra[:,lochan:hichan] # restrict channels
outfil.append_spectra(spectra)
sys.stdout.write("Done \n")
sys.stdout.flush()
if __name__ == '__main__':
parser = ArgumentParser(description="v0.1 Patrick Lazarus (Aug. 28, 2012)")
parser.add_argument("-L", "--lo-freq", dest="lo_freq", type=float,
help="Desired low frequency for output file. Note: "
"actual low frequency will be rounded to the nearest"
"channel (Default: Don't truncate low-freq channels)",
default=None)
parser.add_argument("-H", "--hi-freq", dest="hi_freq", type=float,
help="Desired high frequency for output file. Note: "
"actual high frequency will be rounded to the nearest"
"channel (Default: Don't truncate high-freq channels)",
default=None)
parser.add_argument("-s", "--start-time", dest="start_time", type=float,
help="Start of desired range of input file to write "
"to output file. Note: The actual start time will "
"be rounded to the nearest sample.(Default: Don't "
"truncate from start of file.)", default=None)
parser.add_argument("-e", "--end-time", dest="end_time", type=float,
help="End of desired range of input file to write "
"to output file. Note: The actual end time will "
"be rounded to the nearest sample. (Default: "
"Don't truncate from end of file.)", default=None)
parser.add_argument("--block-size", dest='block_size', default=BLOCKSIZE,
type=float,
help="Number of spectra per block. This is the amount "
"of data manipulated/written at a time. (Default: "
" %d spectra)" % BLOCKSIZE)
parser.add_argument("-o", "--outname", dest='outname', action='store', required=True,
help="The name of the output file.")
(options, args) = parser.parse_args()
main(args)
| 5,544 | 38.049296 | 89 | py |
presto | presto-master/bin/downsample_filterbank.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import range
import sys
import numpy as num
from presto import sigproc
if __name__ == "__main__":
if len(sys.argv)==1:
print("\nusage: downsample_filterbank_hdr.py DS_fact infile.fil\n")
sys.exit()
DS_fact = int(sys.argv[1])
basefilenm = sys.argv[2][:sys.argv[2].find(".fil")]
filhdr = {}
newhdr = "".encode('utf-8')
infile = open(sys.argv[2], 'rb')
# Determine the full size of the file
infile.seek(0, 2)
infilelen = infile.tell()
infile.seek(0, 0)
outfile = open(basefilenm+"_DS%d.fil"%DS_fact, 'wb')
# Loop over the values in the .fil file
while 1:
param, val = sigproc.read_hdr_val(infile, stdout=False)
filhdr[param] = val
if param=="tsamp":
val *= DS_fact
# Append to the new hdr string
newhdr += sigproc.addto_hdr(param, val)
# Break out of the loop if the header is over
if param=="HEADER_END":
break
# Write the new header to the output file
outfile.write(newhdr)
nchans = filhdr['nchans']
# Remove the header length from infilelen and then
# determine how many spectra are in the file
infilelen -= infile.tell()
numspec = infilelen // nchans
if infilelen % nchans:
print("Whoops! File length calculation is not right...")
# Now loop over the spectra
for ii in range(numspec // DS_fact):
try:
x = num.fromfile(infile, dtype=num.ubyte, count=DS_fact*nchans)
x.shape = (DS_fact, nchans)
dsx = (x.mean(0)+0.5).astype(num.ubyte)
dsx.tofile(outfile)
except:
break
infile.close()
outfile.close()
| 1,777 | 25.939394 | 76 | py |
presto | presto-master/bin/pulsestack.py | #!/usr/bin/env python
#
# Script to fold the profile from the dat-file and plot it
# or to plot the time series, or plot the stack of individual
# pulses or subintegrations in different modes
# Also added functionality to use the tim-file in Sigproc-style
# format, and any other raw binary (float) data files
# if corresponding info is provided with --mjd (or --chandra),
# --tsamp and --pulsar options.
# Also, possible to give ascii file with events times (in seconds).
# This is useful to make plots for the X-ray data.
#
# List of options with short help: -h or --help
# Full list of options without help: -l or --list
#
# Vlad, Nov 26, 2010 (c)
#
# Last major update - Dec 5, 2010
#
######################################################################
from __future__ import print_function
from builtins import str
from builtins import range
import numpy as np
import scipy
import scipy.signal
import array as ar
import os.path, stat, sys
import math
import optparse as opt
import presto.infodata as inf
import presto.polycos as poly
from presto import sigproc
# importing VariableColormap from kapteyn module of it exists
try:
from kapteyn.mplutil import VariableColormap
kapteyn_loaded = True
except ImportError:
kapteyn_loaded = False
detrendlen = 1000 # length of a linear piecewise chunk of data for detrending
# get the period from the polyco-file with the fileid=pid
# using the best record in the polyco-file for the mjd=mjd
def get_period(id, mjd):
return (
1.0
/ id.get_phs_and_freq(
float(str(mjd).split(".")[0]), float("0." + str(mjd).split(".")[1])
)[1]
)
# I took this part from the Scott's single_pulse_search.py
def detrending(data, is_fast):
# Compute the file length in detrendlens
roundN = np.size(data) / detrendlen * detrendlen
data = data[:roundN] # here we redefining the arrat and loosing some samples
# Split the data into chunks for detrending
numblocks = roundN / detrendlen
data.shape = (numblocks, detrendlen)
stds = np.zeros(numblocks, dtype=np.float64)
# de-trend the data one chunk at a time and compute statistics
for ii, chunk in enumerate(data):
if is_fast: # use median removal instead of detrending (2x speedup)
tmpchunk = chunk.copy()
tmpchunk.sort()
med = tmpchunk[detrendlen / 2]
chunk -= med
tmpchunk -= med
else:
# The detrend calls are the most expensive in the program
data[ii] = scipy.signal.detrend(chunk, type="linear")
tmpchunk = data[ii].copy()
tmpchunk.sort()
# The following gets rid of (hopefully) most of the
# outlying values (i.e. power dropouts and single pulses)
# If you throw out 5% (2.5% at bottom and 2.5% at top)
# of random gaussian deviates, the measured stdev is ~0.871
# of the true stdev. Thus the 1.0/0.871=1.148 correction below.
# The following is roughly .std() since we already removed the median
stds[ii] = np.sqrt(
(tmpchunk[detrendlen / 40 : -detrendlen / 40] ** 2.0).sum()
/ (0.95 * detrendlen)
)
stds *= 1.148
# sort the standard deviations and separate those with
# very low or very high values
sort_stds = stds.copy()
sort_stds.sort()
# identify the differences with the larges values (this
# will split off the chunks with very low and very high stds
locut = (sort_stds[1 : numblocks / 2 + 1] - sort_stds[: numblocks / 2]).argmax() + 1
hicut = (
(sort_stds[numblocks / 2 + 1 :] - sort_stds[numblocks / 2 : -1]).argmax()
+ numblocks / 2
- 2
)
std_stds = np.std(sort_stds[locut:hicut])
median_stds = sort_stds[(locut + hicut) / 2]
lo_std = median_stds - 4.0 * std_stds
hi_std = median_stds + 4.0 * std_stds
# Determine a list of "bad" chunks. We will not search these.
bad_blocks = np.nonzero((stds < lo_std) | (stds > hi_std))[0]
stds[bad_blocks] = median_stds
# Now normalize all of the data and reshape it to 1-D
data /= stds[:, np.newaxis]
data.shape = (roundN,)
return data
# gives the short list of options without explanations
def list_options(prg):
print("Usage: %s [options] <.dat OR .tim (use --tim option)>" % (prg))
print()
print("Options:")
print(" [-h, --help] [-n, --nbins #BINS] [-p, --pulsar NAME]")
print(" [--polyco FILE] [--period PERIOD] [-b, --block SAMPLES]")
print(" [-s, --startphase PHASE] [-e, --endphase PHASE] [--start TIME]")
print(" [-w, --window TIME] [-f, --fast-detrend] [--no-detrend]")
print(" [-t, --timeseries] [-a, --rebin FACTOR] [-y, --profileonly]")
print(
" [-k, --stacking] [--offset OFFSET] [-d, --dump #PULSES | TIME]"
)
print(" [--saveprof FILE] [--saveonly] [-i, --image FILEEXT]")
print(" [--tim] [--events] [-l, --list]")
print(" [-2, --double] [-m, --mjd MJD] [--tsamp TIME]")
print(" [--chandra]")
print()
print("Graphics Options:")
print(" [--fontsize SIZE] [--color COLOR] [--linestyle STYLE]")
print(" [--linewidth WIDTH] [--marker TYPE] [--markercolor COLOR]")
print(" [--markerwidth WIDTH] [--markersize SIZE] [--facecolor COLOR]")
print(" [--cmap COLORMAP] [-c, --colorbar] [--title STR]")
print(" [--legend STR] [--loc STR] [-g, --grid]")
print(" [--titlepos STR] [--label STR] [--labelpos STR]")
print(" [--no-top-axis] [--no-right-axis]")
if kapteyn_loaded:
print(" [--cmap-scale SCALE]")
###################################################################################################################################
# M A I N
###################################################################################################################################
if __name__ == "__main__":
#
# Parsing the command line options
#
usage = "Usage: %prog [options] <.dat OR .tim (use --tim option)>"
cmdline = opt.OptionParser(usage)
cmdline.add_option(
"-n",
"--nbins",
dest="nbins",
metavar="#BINS",
help="number of phase bins per pulsar period (default: number of samples)",
default=-1,
type="int",
)
cmdline.add_option(
"-p",
"--pulsar",
dest="psrname",
metavar="NAME",
help="pulsar name to be used for polyco instead of inf-file",
type="str",
)
cmdline.add_option(
"--polyco",
dest="polycofile",
metavar="FILE",
help="polyco file to be used for folding (default: %default)",
default="polyco.dat",
type="str",
)
cmdline.add_option(
"--period",
dest="period",
metavar="PERIOD",
help="period in ms for folding. If not given then will use 'polyco.dat'",
default=-1,
type="float",
)
cmdline.add_option(
"-s",
"--startphase",
dest="phase_start",
metavar="PHASE",
help="start phase to plot",
default=0.0,
type="float",
)
cmdline.add_option(
"-e",
"--endphase",
dest="phase_end",
metavar="PHASE",
help="end phase to plot",
default=1.0,
type="float",
)
cmdline.add_option(
"--start",
dest="start_time",
metavar="TIME",
help="time offset from the start in seconds (default: %default)",
default=0,
type="float",
)
cmdline.add_option(
"-w",
"--window",
dest="window_time",
metavar="TIME",
help="duration of the window in seconds (default: whole file)",
default=-1,
type="float",
)
cmdline.add_option(
"-b",
"--block",
dest="blocksize",
metavar="SAMPLES",
help="size of the block for reading the dat-file when folding profile. When using polyco file "
"period is updated for every block. So, for very close binaries this block size "
"should be probably very small. The smallest is 3000. Default: %default",
default=1000000,
type="int",
)
cmdline.add_option(
"-f",
"--fast-detrend",
action="store_true",
dest="fast_detrend",
help="Use a faster method of de-trending the time-series (2x speedup). "
"Sometimes (for strong pulsars (?), many strong pulses of which could effect calculation "
"of the linear trend) this method produces even better results when only median is subtracted",
default=False,
)
cmdline.add_option(
"--no-detrend",
action="store_true",
dest="is_no_detrend",
help="do not detrend the data",
default=False,
)
cmdline.add_option(
"-a",
"--rebin",
dest="rebin",
metavar="FACTOR",
help="averaging time series by FACTOR (default: %default)",
default=1,
type="int",
)
cmdline.add_option(
"-t",
"--timeseries",
action="store_true",
dest="is_timeseries",
help="no folding. Time series will be plotted",
default=False,
)
cmdline.add_option(
"-y",
"--profileonly",
action="store_true",
dest="is_profileonly",
help="only plot the average profile. No stack of pulses or subints",
default=False,
)
cmdline.add_option(
"-2",
"--double",
action="store_true",
dest="is_period_doubled",
help="plot doubled-period profile (only when options -s and -e are not given)",
default=False,
)
cmdline.add_option(
"-k",
"--stacking",
action="store_true",
dest="is_stacking",
help="Plot series of pulses or subints in stacking mode. Default is grey-scale mode",
default=False,
)
cmdline.add_option(
"-d",
"--dump",
dest="dump",
metavar="#PULSES | TIME",
help="number of pulses or time (if . given) to dump for subintegrations",
default="",
type="str",
)
cmdline.add_option(
"--offset",
dest="offset",
metavar="OFFSET",
help="Offset between individual profiles in stacking mode. Default = %default. "
"Offset is in the same units as for profiles' flux density. "
"Only positive (>=0) offsets are allowed",
default=1.0,
type="float",
)
cmdline.add_option(
"--saveprof",
dest="proffile",
metavar="FILE",
help="save profile to binary file FILE",
default="",
type="str",
)
cmdline.add_option(
"--saveonly",
action="store_true",
dest="is_saveonly",
help="only saves png-file and exits",
default=False,
)
cmdline.add_option(
"-i",
"--image",
dest="imageext",
metavar="FILEEXT",
help="image file extension when used with --saveonly (default: %default)",
default="png",
type="str",
)
cmdline.add_option(
"--tim",
action="store_true",
dest="is_timfile",
help="input file is Sigproc-style tim-file. None inf-file is necessary in this case",
default=False,
)
cmdline.add_option(
"-l",
"--list",
action="store_true",
dest="is_printlist",
help="print short list of all options",
default=False,
)
cmdline.add_option(
"--events",
action="store_true",
dest="is_events",
help="input file is ascii file with event time in secs. Must use --mjd option to provide the start MJD. "
"Number of bins _must_ be also given with --nbins option unless --timeseries is set",
default=False,
)
cmdline.add_option(
"-m",
"--mjd",
dest="mjd",
metavar="MJD",
help="start MJD of the data. By default, is read from .inf file or tim-file header. "
"If events file is used, it _must_ be given or --chandra is used",
default="",
type="str",
)
cmdline.add_option(
"--chandra",
action="store_true",
dest="is_chandra",
help="events file is Chandra file, so start MJD is set to 50814.0 (Chandra reference MJD)",
default=False,
)
cmdline.add_option(
"--tsamp",
dest="tsamp",
metavar="TIME",
help="sampling time in sec of the data. By default, is read from .inf file or tim-file header",
default="",
type="str",
)
group = opt.OptionGroup(cmdline, "Graphics Options")
group.add_option(
"--fontsize",
dest="fs",
metavar="SIZE",
help="font size for labels (default: %default)",
default=10,
type="int",
)
group.add_option(
"--color",
dest="color",
metavar="COLOR",
help="line and marker color (default: %default)",
default="green",
type="str",
)
group.add_option(
"--linestyle",
dest="linestyle",
metavar="STYLE",
help="set linestyle. Default is '%default' (solid). "
"Other possible values: '--', '-.', ':', 'None', ' ', '' with possible combination "
"with 'steps', 'steps-pre', 'steps-mid', or 'steps-post'. In stacking mode only "
"'-', '--', '-.', ':' are possible, or their aliases: 'solid', 'dashed', 'dashdot', "
"and 'dotted'",
default="-",
type="str",
)
group.add_option(
"--linewidth",
dest="linewidth",
metavar="WIDTH",
help="set linewidth. Default is '%default'",
default="1.0",
type="float",
)
group.add_option(
"--marker",
dest="marker",
metavar="TYPE",
help="set line and marker color (default: %default). Use 'None' to not use the marker symbol",
default=",",
type="str",
)
group.add_option(
"--markercolor",
dest="markercolor",
metavar="COLOR",
help="set only marker color (default: %default)",
default="green",
type="str",
)
group.add_option(
"--markerwidth",
dest="markerwidth",
metavar="WIDTH",
help="set marker width (default: %default)",
default="1.0",
type="float",
)
group.add_option(
"--markersize",
dest="markersize",
metavar="SIZE",
help="set marker size (default: %default)",
default="6.0",
type="float",
)
group.add_option(
"--facecolor",
dest="facecolor",
metavar="COLOR",
help="marker facecolor or fill color in stacking mode (default: %default)",
default="white",
type="str",
)
group.add_option(
"--cmap",
dest="colormap",
metavar="COLORMAP",
help="set colormap for plotting pulse stack. Default: %default. "
"Other good colormaps: gray, gist_yarg (reversed gray), "
"gist_stern, hot, jet, pink, gist_heat, gist_gray, copper, ...",
default="gist_earth",
type="str",
)
if kapteyn_loaded:
group.add_option(
"--cmap-scale",
dest="cmap_scaling",
metavar="SCALE",
help="set colormap scaling. Default: %default. "
"Other possible values: SQRT, LOG, EXP, SQUARE",
default="LINEAR",
type="str",
)
group.add_option(
"-c",
"--colorbar",
action="store_true",
dest="is_colorbar",
help="plot colorbar when plotting pulse stack",
default=False,
)
group.add_option(
"--title",
dest="title",
metavar="STR",
help="set title of the plot. Use '\\n' to have several lines",
default="",
type="str",
)
group.add_option(
"--titlepos",
dest="titlepos",
metavar="STR",
help="set the position of the title in plot coordinates from 0 till 1. "
"Default is '%default', i.e. x=0.5, y=1.07 (top of the plot), and horizontal "
"alignment is 'center'",
default="0.5,1.07,center",
type="str",
)
group.add_option(
"--legend",
dest="legend",
metavar="STR",
help="set label for the legend when only used with --timeseries or --profileonly options. "
"Use '\\n' to have several lines",
default="",
type="str",
)
group.add_option(
"--loc",
dest="loc",
metavar="STR",
help="location of the legend. Default is '%default'. Other possible values: 'upper left', "
"'lower left', 'lower right', 'right', 'center left', 'center right', 'lower center', "
"'upper center', 'center'.",
default="upper right",
type="str",
)
group.add_option(
"--label",
dest="label",
metavar="STR",
help="set label to put anywhere in the figure. The position is determined by --labelpos option. "
"Use '\\n' to have several lines",
default="",
type="str",
)
group.add_option(
"--labelpos",
dest="labelpos",
metavar="STR",
help="set the position of the label in plot coordinates from 0 till 1. "
"Default is '%default', i.e. x=0.05, y=0.9, and horizontal "
"alignment is 'left'",
default="0.05,0.9,left",
type="str",
)
group.add_option(
"-g",
"--grid",
action="store_true",
dest="is_grid",
help="turn on the grid lines",
default=False,
)
group.add_option(
"--no-top-axis",
action="store_true",
dest="is_no_top_axis",
help="turn off the different labeling on the top axis",
default=False,
)
group.add_option(
"--no-right-axis",
action="store_true",
dest="is_no_right_axis",
help="turn off the different labeling on the right axis",
default=False,
)
cmdline.add_option_group(group)
# reading cmd options
(opts, args) = cmdline.parse_args()
# print short list of options
if opts.is_printlist:
list_options(sys.argv[0].split("/")[-1])
sys.exit(0)
# check if input file is given
if len(args) != 0:
datfile = args[0]
else:
# cmdline.print_help()
cmdline.print_usage()
sys.exit(0)
# importing matplotlib
if opts.is_saveonly:
import matplotlib
matplotlib.use("Agg")
pngname = datfile.split(".dat")[0] + "." + opts.imageext
else:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.collections as collections
import matplotlib.font_manager as fm
headersize = 0 # size in bytes of the header
# if input file is Sigproc-style tim-file
if opts.is_timfile:
try:
filhdr, headersize = sigproc.read_header(datfile)
startmjd = filhdr["tstart"]
tsamp = filhdr["tsamp"]
source = filhdr["source_name"]
except:
print("Error: Can't open the tim-file '%s'!" % (datfile,))
sys.exit(1)
elif opts.is_events:
opts.is_no_detrend = True # we don't do detrending for events
if opts.mjd == "" and not opts.is_chandra:
print(
"Error: for events' file start MJD _must_ be given with --mjd option or --chandra option!"
)
sys.exit(1)
if opts.nbins == -1:
print("Error: number of bins _must_ be given with --nbins option!")
sys.exit(1)
if opts.rebin != 1:
print("Event data can not be re-binned")
opts.rebin = 1
else:
if (
(opts.mjd == "" and not opts.is_chandra)
or opts.tsamp == ""
or opts.psrname == ""
):
# reading inf-file to get corresponding info
inffile = datfile.split(".dat")[0] + ".inf"
try:
id = inf.infodata(inffile)
tsamp = id.dt # sampling time
startmjd = id.epoch # start MJD
source = id.object # pulsar name
except:
print("Error: Can't read the inf-file '%s'!" % (inffile,))
sys.exit(1)
# overwriting MJD, tsamp, pulsarname from the command line if given
if opts.is_chandra:
opts.mjd = "50814.0"
print("Chandra event file. Reference start MJD is %s" % (opts.mjd))
if opts.mjd != "":
startmjd = float(opts.mjd)
if opts.tsamp != "":
tsamp = float(opts.tsamp)
if opts.psrname != "":
source = opts.psrname
# checking start, end times and adjusting them if necessary
if opts.start_time < 0:
print("Error: start time %.3f is < 0!" % (opts.start_time))
sys.exit(1)
if not opts.is_events:
# getting the size of the file
try:
size = (os.stat(datfile)[stat.ST_SIZE] - headersize) / 4 # 4 bytes in float
except:
print("Error: Can't open the input file '%s'!" % (datfile,))
sys.exit(1)
# checking start, end times and adjusting them if necessary
start_sample = int(opts.start_time / tsamp)
if start_sample >= size:
print("Start time %.3f is out of range!" % (opts.start_time))
sys.exit(1)
else:
size = size - start_sample
# adjusting start MJD
opts.start_time = start_sample * tsamp
if opts.window_time != -1:
window = int(opts.window_time / tsamp)
opts.window_time = window * tsamp
else:
window = size
opts.window_time = window * tsamp
if start_sample + window > size:
print(
"End time %.3f is out of range. The window duration will be adjusted"
% (opts.start_time + opts.window_time)
)
window = size - start_sample
opts.window_time = window * tsamp
print("New window is %.3f s" % (opts.window_time))
else: # reading the file, and so, we can get the start and end events' time
events = np.loadtxt(
datfile, comments="#", usecols=(0, 0), dtype=float, unpack=True
)[0]
try:
energy = np.loadtxt(
datfile, comments="#", usecols=(1, 1), dtype=float, unpack=True
)[0]
except:
energy = []
if np.size(energy) == 0 and opts.is_timeseries:
print(
"No energy column is given in the events'file '%s'. It _must_ be given to plot the timeseries!"
% (datfile)
)
sys.exit(1)
duration = events[-1] - events[0]
if opts.start_time >= duration:
print(
"Start time %.3f sec is more than duration of observation of %.3f sec!"
% (opts.start_time, duration)
)
sys.exit(1)
else:
events.compress(events >= opts.start_time + events[0])
if opts.is_timeseries:
energy.compress(events >= opts.start_time + events[0])
if opts.window_time == -1:
opts.window_time = duration
if opts.start_time + opts.window_time > duration:
print(
"End time %.3f is out of range. The window duration will be adjusted"
% (opts.start_time + opts.window_time)
)
opts.window_time = duration - opts.start_time
print("New window is %.3f s" % (opts.window_time))
# checking dump settings
if opts.dump != "":
if "." in opts.dump: # dump time is given
dump_time = float(opts.dump)
dump_pulses = 0
if dump_time > 0:
print("Dump time is %.3f s" % (dump_time))
else:
print("Dump time %.3f should be > 0!" % (dump_time))
sys.exit(1)
else: # number of pulses to dump is given
dump_pulses = int(opts.dump)
dump_time = 0
if dump_pulses > 0:
print("Number of pulses in subintegration is %d" % (dump_pulses))
else:
print(
"Number of pulses in subintegration %d should be > 0!"
% (dump_pulses)
)
sys.exit(1)
else:
dump_pulses = 0
dump_time = 0
# Checking the phases and correct if necessary
if not opts.is_timeseries:
if opts.phase_start < 0 or opts.phase_start >= 1.0:
opts.phase_start = 0.0
print(
"Start phase is out of range. Adjusted value is %.3f"
% (opts.phase_start,)
)
if opts.phase_end <= 0.0 or opts.phase_end > 1.0:
opts.phase_end = 1.0
print(
"End phase is out of range. Adjusted value is %.3f" % (opts.phase_end,)
)
if opts.phase_end <= opts.phase_start:
print(
"End phase %.3f is <= than start phase %.3f! Please adjust."
% (opts.phase_end, opts.phase_start)
)
sys.exit(1)
if not opts.is_no_detrend:
# checking the blocksize
if opts.blocksize < 3 * detrendlen:
print(
"Block size is too small: %d. Will be increased to %d."
% (opts.blocksize, 3 * detrendlen)
)
opts.blocksize = 3 * detrendlen
# making blocksize to be divisible by detrendlen (in order not to loose samples between blocks)
if int(opts.blocksize / detrendlen) * detrendlen != opts.blocksize:
opts.blocksize = detrendlen * (1 + int(opts.blocksize / detrendlen))
print(
"Adjusting block size to %d to be divisible by detrendlen=%d"
% (opts.blocksize, detrendlen)
)
# forming the array of time samples
if not opts.is_events:
try:
infile = open(datfile, "rb")
except:
print("Error: Can't read the dat-file '%s'!" % (datfile,))
sys.exit(1)
dataptr = ar.array("f") # 'f' - for float
infile.seek(
headersize + 4 * start_sample
) # position to the first byte to read; '4' - is the size of float
else:
data = events / 86400.0 # converting events to days
data += startmjd # converting to MJD
events -= events[
0
] # converting events' time relative to the start of observation
# Folding the profile
if not opts.is_timeseries:
if (
opts.period == -1
): # Period is not given in the cmdline, so will use polyco file
pid = poly.polycos(source, opts.polycofile)
try:
if not opts.is_events:
fold_period = get_period(pid, startmjd)
else:
fold_period = get_period(pid, data[0])
except:
print(
"Check the name of the pulsar in polyco file '%s' and inf-file '%s'!"
% (opts.polycofile, inffile)
)
print(
"If different, try --pulsar option to set the name of pulsar the same as in polyco file."
)
sys.exit(1)
is_update_period = True
if fold_period <= 0:
print(
"Computed fold period is bad: %f. Check your polyco and/or MJD!"
% (float(fold_period))
)
sys.exit(1)
else: # period is given
fold_period = opts.period / 1000.0
is_update_period = False
# if number of bins is not specified (should always be specified for events)
if opts.nbins == -1:
opts.nbins = int(fold_period / tsamp)
# if dump_time is given - checking that it is >= fold_period
if dump_time > 0:
if dump_time < fold_period:
print(
"Dump time %.3f s is less than folding period of %f s. Adjusting to match."
% (dump_time, fold_period)
)
dump_time = fold_period
print("Dump time is now %.3f s" % (dump_time))
if dump_time > opts.window_time:
print(
"Dump time %.3f is more than window duration of %f s. Adjusting..."
% (dump_time, opts.window_time)
)
# we make it a one period less than duration, because otherwise plt.imshow
# fails to plot
dump_time = opts.window_time - fold_period
print("Dump time is now %.3f s" % (dump_time))
else: # we need this for plotting purposes
dump_time = dump_pulses * fold_period
if dump_time > opts.window_time:
print(
"Number of pulses per subintegration %d is more than within window duration of %f s."
% (dump_pulses, opts.window_time)
)
print("Adjusting to match to the closest maximum possible number.")
dump_pulses = int((opts.window_time - fold_period) / fold_period)
dump_time = dump_pulses * fold_period
print("Number of pulses per subintegration is now %d" % (dump_pulses))
bin_start = int(opts.nbins * opts.phase_start)
bin_end = int(math.ceil((opts.nbins - 1) * opts.phase_end))
bin_range = bin_end - bin_start + 1
if "%f" % (opts.phase_start) != "%f" % (float(bin_start) / opts.nbins):
opts.phase_start = float(bin_start) / opts.nbins
print(
"Adjusting the start phase to %.3f to account for integer number of bins"
% (opts.phase_start)
)
if "%f" % (opts.phase_end) != "%f" % (float(bin_end) / (opts.nbins - 1)):
opts.phase_end = float(bin_end) / (opts.nbins - 1)
print(
"Adjusting the end phase to %.3f to account for integer number of bins"
% (opts.phase_end)
)
# initializing some variables and arrays
elapsed_secs = opts.start_time
elapsed_turns = 0.0
ibin = 0
lbin = -1
pulsecount = 0
profile = np.zeros(bin_range, dtype=float) # profile array
counts = np.zeros(
bin_range, dtype=float
) # array that keeps number of counts in each bin of the profile
if (
not opts.is_profileonly
): # initialize the 2D array to keep the stack of pulses
if dump_pulses > 0:
npulses_expect = int(
math.ceil(
int((opts.window_time) / fold_period) / float(dump_pulses)
)
)
elif dump_time > 0:
npulses_expect = int(math.ceil((opts.window_time) / dump_time))
else:
npulses_expect = int(math.ceil((opts.window_time) / fold_period))
# increase that value by 25% (overkill probably) in order to be safe if period is changing significantly
# over the course of observations
npulses_expect += int(0.25 * npulses_expect)
if npulses_expect == 1:
npulses_expect += 1
pulsestack = np.zeros((npulses_expect, bin_range), dtype=float)
if dump_pulses > 0 or dump_time > 0:
dump = np.zeros(bin_range, dtype=float) # current subintegration
dumpcount = 0 # number of pulses in current subintegration
dump_next = elapsed_secs + dump_time # time of the next dump
dumpcounts = np.zeros(
bin_range, dtype=float
) # array that keeps number of counts in each bin of the subintegration
# in case of events' file we always use dump_time
if opts.is_events:
if dump_time == 0: # always use dump_time
dump_time = fold_period
dumpcount = 0 # number of pulses in current subintegration
dump_next = elapsed_secs + dump_time # time of the next dump
dumpcounts = np.zeros(
bin_range, dtype=float
) # array that keeps number of counts in each bin of the subintegration
if not opts.is_events:
# Loop over the chunks of data to read
while 1:
samplesleft = (
headersize + (start_sample + window) * 4 - infile.tell()
) / 4
if samplesleft <= 0: # leaving from this infinite while loop
break
if samplesleft > opts.blocksize:
dataptr.fromfile(infile, opts.blocksize)
else:
dataptr.fromfile(infile, samplesleft)
data = np.array(dataptr)
dataptr = ar.array("f") # clear the array. This is important!
readsamples = np.size(data) # how many samples we have read
# detrending the data
if not opts.is_no_detrend:
# this check is necessary when reading the very last block and if its size is _very_ small
# then we just have to skip it when detrending
if readsamples < 3 * detrendlen:
break
data = detrending(data, opts.fast_detrend)
size = np.size(data)
# updating the period if necessary
if is_update_period:
fold_period = get_period(pid, startmjd + elapsed_secs / 86400.0)
# main folding loop over the read samples
turns = elapsed_turns
secs = elapsed_secs
for s in range(0, size):
# phase of each sample
phase = turns - math.floor(turns)
if phase >= opts.phase_start and phase <= opts.phase_end:
ibin = int(opts.nbins * phase) - bin_start
if ibin == opts.nbins:
ibin -= 1
if ibin - lbin < 0:
pulsecount += 1
counts[ibin] += 1.0
profile[ibin] += data[s]
if not opts.is_profileonly:
# making the subintegration
if dump_pulses > 0 or dump_time > 0:
# if number of pulses to dump is given
if (
dump_pulses > 0
and ibin - lbin < 0
and pulsecount != 0
and pulsecount % dump_pulses == 0
):
pulsestack[dumpcount] = np.array(
[
dump[i]
/ (
dumpcounts[i] == 0.0
and 1.0
or dumpcounts[i]
)
for i in range(0, bin_range)
],
dtype=float,
)
dumpcount += 1
dumpcounts[:] = 0.0
dump[:] = 0.0
# if dump time is given
if (
dump_time > 0
and dump_pulses <= 0
and dump_next - secs <= tsamp / 2.0
):
pulsestack[dumpcount] = np.array(
[
dump[i]
/ (
dumpcounts[i] == 0.0
and 1.0
or dumpcounts[i]
)
for i in range(0, bin_range)
],
dtype=float,
)
dumpcount += 1
dumpcounts[:] = 0.0
dump[:] = 0.0
dump_next = secs + dump_time
# after the dumps (if happened) we still need to continue with the current sample
# that belongs already to the next dump
dumpcounts[ibin] += 1.0
dump[ibin] += data[s]
else: # no subintegrations
pulsestack[pulsecount][ibin] = data[s]
turns += tsamp / fold_period
secs += tsamp
lbin = ibin
elapsed_secs += readsamples * tsamp
elapsed_turns += readsamples * tsamp / fold_period
else: # if events are given
for tt in range(np.size(data)):
# updating the period if necessary
if is_update_period:
fold_period = get_period(pid, data[tt])
turns = events[tt] / fold_period
# phase of each sample
phase = turns - math.floor(turns)
if phase >= opts.phase_start and phase <= opts.phase_end:
ibin = int(opts.nbins * phase) - bin_start
if ibin == opts.nbins:
ibin -= 1
counts[ibin] += 1.0
if not opts.is_profileonly:
# if dump time is given (always use in case of events file)
if (
tt < np.size(data) - 1 and dump_next < events[tt + 1]
) or tt == np.size(data) - 1:
pulsestack[dumpcount] = dumpcounts
dumpcount += 1
dumpcounts[:] = 0.0
dump_next += dump_time
# after the dumps (if happened) we still need to continue with the current sample
# that belongs already to the next dump
dumpcounts[ibin] += 1.0
# normalizing the profile
if not opts.is_events:
profile = np.array(
[
profile[i] / (counts[i] == 0.0 and 1.0 or counts[i])
for i in range(0, bin_range)
],
dtype=float,
)
else:
profile = counts
# saving the profile to binary file
if opts.proffile != "":
outp = open(opts.proffile, "wb")
outbins = ar.array("f")
outbins.fromlist(profile.tolist())
outbins.tofile(outp)
outp.close()
else:
if not opts.is_events:
# if one wants just to plot time series, just read the whole selected block and then plot it
# If the size of window is too large, then probably Python won't be able to read it all at once
dataptr.fromfile(infile, window)
data = np.array(dataptr)
dataptr = ar.array(
"f"
) # clear the array. Though, here it's not important or necessary
# detrending the data
if not opts.is_no_detrend:
data = detrending(data, opts.fast_detrend)
window = np.size(data)
opts.window_time = window * tsamp
#
# Plotting
#
fig = plt.figure()
if opts.is_timeseries: # plotting the time series
if opts.rebin == 1:
if not opts.is_events:
flux = data
else:
flux = energy
else:
window = int(window / opts.rebin)
tsamp *= opts.rebin
flux = [
np.average(data[k * opts.rebin : (k + 1) * opts.rebin])
for k in range(0, window)
]
if not opts.is_events:
time = [opts.start_time + tsamp * t for t in range(0, window)]
else:
time = events
ax = fig.add_subplot(111)
plt.xlabel("Time (s)", fontsize=opts.fs)
if opts.is_events:
plt.ylabel("Energy", fontsize=opts.fs)
elif opts.is_no_detrend:
plt.ylabel("Flux density (arb. units)", fontsize=opts.fs)
else:
plt.ylabel("Flux density ($\sigma$)", fontsize=opts.fs)
ax.plot(
time,
flux,
color="%s" % (opts.color),
marker="%s" % (opts.marker),
markeredgecolor="%s" % (opts.markercolor),
markerfacecolor="%s" % (opts.facecolor),
linestyle="%s" % (opts.linestyle),
linewidth=opts.linewidth,
markeredgewidth=opts.markerwidth,
markersize=opts.markersize,
label="%s" % ("\n".join(opts.legend.split("\\n"))),
)
# turn on grid
if opts.is_grid:
plt.grid(True)
if not opts.is_no_top_axis:
if not opts.is_events:
axtop = plt.twiny()
axtop.xaxis.tick_top()
axtop.xaxis.set_label_position("top")
axtop.set_xlim(xmin=start_sample, xmax=start_sample + window - 1)
for label in axtop.get_xticklabels():
label.set_fontsize(opts.fs)
plt.xlabel("Samples", fontsize=opts.fs)
plt.gca().minorticks_on()
ax.set_xlim(xmin=time[0], xmax=time[-1])
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%g"))
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter("%g"))
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())
for label in ax.get_xticklabels():
label.set_fontsize(opts.fs)
for label in ax.get_yticklabels():
label.set_fontsize(opts.fs)
elif opts.is_profileonly: # plotting only the profile
flux = profile
phase = [float(n) / opts.nbins for n in range(bin_start, bin_end + 1)]
if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1:
flux = np.append(flux, profile)
phase = np.append(
phase,
[
float(opts.nbins + n) / opts.nbins
for n in range(bin_start, bin_end + 1)
],
)
ax = fig.add_subplot(111)
plt.xlabel("Pulse phase", fontsize=opts.fs)
if opts.is_events:
plt.ylabel("Counts/bin", fontsize=opts.fs)
elif opts.is_no_detrend:
plt.ylabel("Flux density (arb. units)", fontsize=opts.fs)
else:
plt.ylabel("Flux density ($\sigma$)", fontsize=opts.fs)
ax.plot(
phase,
flux,
color="%s" % (opts.color),
marker="%s" % (opts.marker),
markeredgecolor="%s" % (opts.markercolor),
markerfacecolor="%s" % (opts.facecolor),
linestyle="%s" % (opts.linestyle),
linewidth=opts.linewidth,
markeredgewidth=opts.markerwidth,
markersize=opts.markersize,
label="%s" % ("\n".join(opts.legend.split("\\n"))),
)
# turn on grid
if opts.is_grid == True:
plt.grid(True)
if not opts.is_no_top_axis:
axtop = plt.twiny()
axtop.xaxis.tick_top()
axtop.xaxis.set_label_position("top")
if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1:
axtop.set_xlim(xmin=bin_start, xmax=opts.nbins + bin_end)
else:
axtop.set_xlim(xmin=bin_start, xmax=bin_end)
for label in axtop.get_xticklabels():
label.set_fontsize(opts.fs)
plt.xlabel("Phase bin", fontsize=opts.fs)
plt.gca().minorticks_on()
if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1:
ax.set_xlim(xmin=opts.phase_start, xmax=2.0 * opts.phase_end)
else:
ax.set_xlim(xmin=opts.phase_start, xmax=opts.phase_end)
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%g"))
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter("%g"))
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())
for label in ax.get_xticklabels():
label.set_fontsize(opts.fs)
for label in ax.get_yticklabels():
label.set_fontsize(opts.fs)
elif opts.is_stacking: # plotting the stack of pulses or subints
ax = fig.add_subplot(111)
if not opts.is_no_top_axis:
plt.xlabel("Phase bin", fontsize=opts.fs)
if not opts.is_no_right_axis:
if opts.is_events:
plt.ylabel("Counts/bin", fontsize=opts.fs)
elif opts.is_no_detrend:
plt.ylabel("Flux density (arb. units)", fontsize=opts.fs)
else:
plt.ylabel("Flux density ($\sigma$)", fontsize=opts.fs)
dr = abs(opts.offset)
if dump_pulses > 0 or dump_time > 0:
ncount = dumpcount
else:
ncount = pulsecount
ymin = pulsestack[0:ncount].min()
ymax = pulsestack[0:ncount].max()
dr = abs(opts.offset)
dmins = []
dmaxs = []
t = np.arange(bin_start, bin_end + 1, dtype=float)
if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1:
t = np.append(t, [opts.nbins + n for n in range(bin_start, bin_end + 1)])
pulseverts = []
for i in np.arange(ncount - 1, -1, -1):
temp = pulsestack[i] + i * dr
if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1:
temp = np.append(temp, temp)
dmins.append(temp.min())
dmaxs.append(temp.max())
pulseverts.append(
np.vstack(
(
np.hstack((t[:, np.newaxis], temp[:, np.newaxis])),
[t[-1], ymin - 1],
[0, ymin - 1],
)
)
)
ymin = np.array(dmins).min()
ymax = np.array(dmaxs).max()
polys = collections.PolyCollection(pulseverts, closed=True)
polys.set_edgecolor("%s" % (opts.color))
polys.set_facecolor("%s" % (opts.facecolor))
polys.set_linestyle("%s" % (opts.linestyle))
polys.set_linewidth("%s" % (opts.linewidth))
ax.add_collection(polys)
plt.gca().minorticks_on()
# turn on grid
if opts.is_grid:
plt.grid(True)
axbot = plt.twiny()
axbot.xaxis.tick_bottom()
axbot.xaxis.set_label_position("bottom")
if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1:
axbot.set_xlim(xmin=opts.phase_start, xmax=2.0 * opts.phase_end)
else:
axbot.set_xlim(xmin=opts.phase_start, xmax=opts.phase_end)
for label in axbot.get_xticklabels():
label.set_fontsize(opts.fs)
plt.xlabel("Pulse phase", fontsize=opts.fs)
plt.gca().minorticks_on()
ayleft = plt.twinx()
ayleft.yaxis.tick_left()
ayleft.yaxis.set_label_position("left")
if (dump_pulses > 0 or dump_time > 0) and not opts.is_events:
ayleft.set_ylim(ymin=0.0, ymax=ncount * dump_time)
else:
ayleft.set_ylim(ymin=0.0, ymax=opts.window_time)
for label in ayleft.get_yticklabels():
label.set_fontsize(opts.fs)
plt.ylabel("Observing time (s)", fontsize=opts.fs)
plt.gca().minorticks_on()
# Determining the main (top/right) axes
if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1:
ax.set_xlim(xmin=bin_start, xmax=opts.nbins + bin_end - 0.1)
else:
ax.set_xlim(xmin=bin_start, xmax=bin_end - 0.1)
ax.set_ylim(ymin=ymin, ymax=ymax)
if not opts.is_no_top_axis:
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
for label in ax.get_xticklabels():
label.set_fontsize(opts.fs)
plt.gca().minorticks_on()
else: # if don't want to show then turning everything off (I can not duplicate the limits and
# ticks from the bottom axis because then imshow won't work properly
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.xaxis.set_minor_locator(ticker.NullLocator())
if not opts.is_no_right_axis:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
for label in ax.get_yticklabels():
label.set_fontsize(opts.fs)
plt.gca().minorticks_on()
else: # if don't want to show then turning everything off (I can not duplicate the limits and
# ticks from the left axis because then imshow won't work properly
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_minor_locator(ticker.NullLocator())
else: # plotting the stack of pulses or subints in grey-scale mode
ax = fig.add_subplot(111)
if not opts.is_no_top_axis:
plt.xlabel("Phase bin", fontsize=opts.fs)
if not kapteyn_loaded:
colormap = cm.get_cmap(opts.colormap)
else:
colormap = VariableColormap(cm.get_cmap(opts.colormap))
colormap.set_scale(opts.cmap_scaling)
if dump_pulses > 0 or dump_time > 0:
ncount = dumpcount
if not opts.is_no_right_axis:
plt.ylabel("Sub-integration", fontsize=opts.fs)
else:
ncount = pulsecount
if not opts.is_no_right_axis:
plt.ylabel("Pulse number", fontsize=opts.fs)
if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1:
pulsestack = np.concatenate(
(pulsestack[:ncount], pulsestack[:ncount]), axis=1
)
cax = ax.imshow(
pulsestack[:ncount],
interpolation=None,
aspect="auto",
extent=(bin_start, opts.nbins + bin_end, 0, ncount),
origin="lower",
cmap=colormap,
)
else:
cax = ax.imshow(
pulsestack[:ncount],
interpolation=None,
aspect="auto",
extent=(bin_start, bin_end, 0, ncount),
origin="lower",
cmap=colormap,
)
if opts.is_colorbar:
cbar = fig.colorbar(cax, orientation="horizontal", spacing="proportional")
if opts.is_events:
cbar.ax.set_xlabel("Counts/bin", fontsize=opts.fs)
elif opts.is_no_detrend:
cbar.ax.set_xlabel("Flux density (arb. units)", fontsize=opts.fs)
else:
cbar.ax.set_xlabel("Flux density ($\sigma$)", fontsize=opts.fs)
for label in cbar.ax.get_xticklabels():
label.set_fontsize(opts.fs)
plt.gca().minorticks_on()
# turn on grid
if opts.is_grid == True:
plt.grid(True)
axbot = plt.twiny()
axbot.xaxis.tick_bottom()
axbot.xaxis.set_label_position("bottom")
if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1:
axbot.set_xlim(xmin=opts.phase_start, xmax=2.0 * opts.phase_end)
else:
axbot.set_xlim(xmin=opts.phase_start, xmax=opts.phase_end)
for label in axbot.get_xticklabels():
label.set_fontsize(opts.fs)
plt.xlabel("Pulse phase", fontsize=opts.fs)
plt.gca().minorticks_on()
ayleft = plt.twinx()
ayleft.yaxis.tick_left()
ayleft.yaxis.set_label_position("left")
if (dump_pulses > 0 or dump_time > 0) and not opts.is_events:
ayleft.set_ylim(ymin=0.0, ymax=ncount * dump_time)
else:
ayleft.set_ylim(ymin=0.0, ymax=opts.window_time)
for label in ayleft.get_yticklabels():
label.set_fontsize(opts.fs)
plt.ylabel("Observing time (s)", fontsize=opts.fs)
plt.gca().minorticks_on()
# Determining the main (top/right) axes
if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1:
ax.set_xlim(xmin=bin_start, xmax=opts.nbins + bin_end)
else:
ax.set_xlim(xmin=bin_start, xmax=bin_end)
ax.set_ylim(ymin=0, ymax=ncount)
if not opts.is_no_top_axis:
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
for label in ax.get_xticklabels():
label.set_fontsize(opts.fs)
plt.gca().minorticks_on()
else: # if don't want to show then turning everything off (I can not duplicate the limits and
# ticks from the bottom axis because then imshow won't work properly
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.xaxis.set_minor_locator(ticker.NullLocator())
if not opts.is_no_right_axis:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
for label in ax.get_yticklabels():
label.set_fontsize(opts.fs)
plt.gca().minorticks_on()
else: # if don't want to show then turning everything off (I can not duplicate the limits and
# ticks from the left axis because then imshow won't work properly
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_minor_locator(ticker.NullLocator())
# Making the title
if opts.title != "":
ax.set_title(
"\n".join(opts.title.split("\\n")),
fontsize=opts.fs,
x=opts.titlepos.split(",")[0],
y=opts.titlepos.split(",")[1],
ha="%s" % (opts.titlepos.split(",")[-1]),
)
# Putting the label
if opts.label != "":
ax.annotate(
"\n".join(opts.label.split("\\n")),
fontsize=opts.fs,
xycoords="axes fraction",
xy=(0, 0),
xytext=(opts.labelpos.split(",")[0], opts.labelpos.split(",")[1]),
ha="%s" % (opts.labelpos.split(",")[-1]),
)
# turn on the Legend (only when is used in profile-only mode or timeseries mode)
if opts.legend != "" and (opts.is_timeseries or opts.is_profileonly):
prop = fm.FontProperties(size=opts.fs)
plt.legend(prop=prop, loc="%s" % (opts.loc))
# end of plotting
if opts.is_saveonly:
plt.savefig(pngname)
else:
plt.show()
# closing input-file
if not opts.is_events:
infile.close()
| 57,353 | 37.236 | 131 | py |
presto | presto-master/bin/plot_spd.py | #! /usr/bin/env python
from presto.singlepulse.plot_spd import main
if __name__ == '__main__':
main()
| 107 | 17 | 44 | py |
presto | presto-master/bin/psrfits_quick_bandpass.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import zip
import numpy as np
import matplotlib.pyplot as plt
import sys
from presto import psrfits
from optparse import OptionParser
usage = """usage: %prog [options] PSRFITS_FILEs
Calculate the average and stdev bandpass of PSRFITS search data"""
def write_bandpass(filenm, freqs, means, stdevs):
of = open(filenm, 'w')
of.write("# Chan Freq(MHz) Mean StDev\n")
for ii, (freq, mean, stdev) in enumerate(zip(freqs, means, stdevs)):
of.write("%6d %9.3f %9.3f %9.3f\n" % (ii, freq, mean, stdev))
of.close()
def plot_bandpass(freqs, means, stdevs, outfile=None):
plt.plot(freqs, means, '-k',
freqs, means+stdevs, '-r',
freqs, means-stdevs, '-r')
plt.xlabel("Frequency (MHz)")
plt.ylabel("Relative power or Counts")
if outfile is None:
plt.show()
else:
plt.savefig(outfile)
def main():
parser = OptionParser(usage)
parser.add_option("-x", "--xwin", action="store_true", dest="xwin",
default=False, help="Show the bandpass in an x-window as well")
parser.add_option("-p", "--plot", action="store_true", dest="plot",
default=False, help="Show the bandpass in a .png plot as well")
parser.add_option("-n", "--nomods", action="store_true", dest="nomods",
default=False, help="Do not apply offsets/scales (default applies)")
parser.add_option("-w", "--weights", action="store_true", dest="weights",
default=False, help="Apply weights (default doesn't apply)")
parser.add_option("-f", "--first", type="int", dest="subfirst", default=0,
help="First subint to compute stats for")
parser.add_option("-s", "--skip", type="int", dest="subskip", default=10,
help="Number of subints to skip during stats calculations")
parser.add_option("-o", "--outfile", type="string", dest="outfile", default=None,
help="Output filename (default will be INFILE.bandpass)")
(opts, args) = parser.parse_args()
if len(args)==0:
parser.print_help()
sys.exit(0)
for infile in args:
print("Processing '%s'" % (infile))
pf = psrfits.PsrfitsFile(infile)
if opts.nomods:
# for a bandpass histogram of raw bits
htot = np.zeros(1<<pf.nbits)
subints = np.arange(opts.subfirst, pf.specinfo.num_subint,
opts.subskip).astype(np.int64)
means = np.zeros((len(subints), pf.nchan))
stdevs = np.zeros((len(subints), pf.nchan))
for ii, subint in enumerate(subints):
print("%.0f%%.." % (100.0 * float(subint) / pf.specinfo.num_subint), end=' ')
sys.stdout.flush()
specs = pf.read_subint(subint, apply_weights=opts.weights,
apply_scales=not opts.nomods,
apply_offsets=not opts.nomods,
apply_zero_off=not opts.nomods)
if opts.nomods:
h, b = np.histogram(specs.flatten(),
bins=np.arange((1<<pf.nbits)+1))
htot += h
means[ii] = specs.mean(axis=0)
stdevs[ii] = specs.std(axis=0)
print("%.0f%%" % (100.0))
med_mean = np.median(means, axis=0)
med_stdev = np.median(stdevs, axis=0)
outfilenm = infile+".bandpass" if opts.outfile is None else opts.outfile
plotfilenm = outfilenm+".png" if opts.plot else None
if opts.xwin or opts.plot:
plot_bandpass(pf.freqs, med_mean, med_stdev, outfile=plotfilenm)
if opts.nomods:
htot = htot / htot.sum()
print("# Bits histogram")
print("# val fract")
print("#---------------")
for b, h in zip(b, htot):
print("%3d %6.4f" % (b, h))
write_bandpass(outfilenm, pf.freqs, med_mean, med_stdev)
if __name__=='__main__':
main()
| 4,103 | 43.129032 | 90 | py |
presto | presto-master/bin/pfd_for_timing.py | #!/usr/bin/env python
from __future__ import print_function
import sys
from presto import prepfold
if len(sys.argv) == 1:
sys.stderr.write("""usage: pfd_for_timing.py PFDFILES\n
This script returns 'true' or 'false' if a .pfd file can be
used for timing via get_TOAs.py or not.\n""")
sys.exit(0)
for pfdfile in sys.argv[1:]:
try:
pfd = prepfold.pfd(pfdfile)
if pfd.use_for_timing():
print("%s: true"%pfdfile)
else:
print("%s: false"%pfdfile)
except:
sys.stderr.write("Error: Can't check '%s'\n"%pfdfile)
| 589 | 25.818182 | 63 | py |
presto | presto-master/bin/GUPPI_drift_prep.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import range
import sys, os, random
from astropy.io import fits
from presto import sigproc
from presto import psr_utils as pu
import math as math
def guppi_subint_per_file(guppi_filenm):
"""
guppi_samples_per_file(spigot_filenm,):
Return the number of subints present in the GUPPI FITs file.
"""
fitsfile = fits.open(guppi_filenm,memmap=True)
nsubint = fitsfile['SUBINT'].header['NAXIS2']
fitsfile.close()
return nsubint
def guppi_time_per_subint(guppi_filenm):
fitsfile = fits.open(guppi_filenm,memmap=True)
subint_hdr = fitsfile['SUBINT'].header
time_subint = subint_hdr['TBIN']*subint_hdr['NSBLK']
fitsfile.close()
return time_subint
debug = 0
if __name__=="__main__":
if (len(sys.argv) < 3):
print("usage: GUPPI_drift_prep.py NUM guppi_fits_files")
print(" NUM is the 'beam' number in the scan. It starts ")
print(" with 0 and goes to NMAX. If NUM is < 0, NMAX")
print(" is sent to STDOUT by the program.")
sys.exit()
new_obs_length = 141 #approximate number of seconds we want the new
#observations to last
infilenms = sys.argv[2:]
print(len(infilenms))
time_subint = guppi_time_per_subint(infilenms[0])
orig_N = int(math.floor(new_obs_length / time_subint)) # Number of subints to analyze at a time (~141 sec)
print(orig_N)
raw_N = orig_N # Number of subints to step through .fits files
overlap_factor = 0.5 # Overlap each orig_N samples by this fraction
overlap_subints = int(orig_N * overlap_factor)
print(overlap_subints)
nom_subint_per_file = 320
# Now see how much data we have to work with
subints_per_file = []
numinfiles = len(infilenms)
print(numinfiles)
for ii in range(numinfiles):
subints = guppi_subint_per_file(infilenms[ii])
if ((subints < nom_subint_per_file) and (ii < numinfiles-1)):
print("Warning! '%s' only has %d samples!"%\
(infilenms[ii], subints))
print(" You need to fix that file!")
sys.exit(-1)
subints_per_file.append(subints)
total_subints = sum(subints_per_file)
print(total_subints)
num = int(sys.argv[1])
nmax = total_subints//overlap_subints-1
if num < 0:
print(nmax)
sys.exit(0)
if num > nmax:
print("NUM > NMAX (%d)! Exiting!"%nmax)
sys.exit(-1)
# Now figure out which file is the first
first_subint = num * overlap_subints
accum_subints = 0
for ii in range(len(subints_per_file)):
next_accum_subints = accum_subints + subints_per_file[ii]
if next_accum_subints > first_subint:
first_filenm = infilenms[ii]
# How much data to skip in the first file
skip = first_subint - accum_subints
# How many total files we need
first_file_subints = subints_per_file[ii]-skip
numfiles = (raw_N - first_file_subints) // nom_subint_per_file + 1
if ((raw_N - first_file_subints) % nom_subint_per_file):
numfiles += 1
if debug:
print("first_filenum = ", ii)
print("1st subint = ", first_subint)
print("1st filenam = ", infilenms[ii])
print("skip = ", skip)
print("1st_file_samps = ", first_file_subints)
print("numfiles = ", numfiles)
break
else:
accum_subints += subints_per_file[ii]
# Now make a command line option for guppidrift2fil.py
tmpfilenm = "tmp%d.fil"%random.randint(0,2**30)
cmd = "guppidrift2fil.py --skip=%d --nsubint=%d -o %s " % \
(skip, raw_N, tmpfilenm)
for goodfile in infilenms[ii:ii+numfiles]:
cmd += "%s "%goodfile
print(cmd)
os.system(cmd)
# Now read the header to determine what the correct filename
# should be. Use that to rename the fil file.
filhdr, hdrlen = sigproc.read_header(tmpfilenm)
MJDi = int(filhdr['tstart'])
ra_rad = sigproc.ra2radians(filhdr['src_raj'])
ra_string = pu.coord_to_string(*pu.rad_to_hms(ra_rad))
dec_rad = sigproc.dec2radians(filhdr['src_dej'])
dec_string = pu.coord_to_string(*pu.rad_to_dms(dec_rad))
str_coords = "".join(ra_string.split(":")[:2])
if dec_rad >= 0.0: str_coords += "+"
str_coords += "".join(dec_string.split(":")[:2])
filfilenm = "GBT350drift_%d_%s.fil" % (MJDi, str_coords)
os.rename(tmpfilenm, filfilenm)
print("Renamed '%s' to '%s'." % (tmpfilenm, filfilenm))
| 4,721 | 35.604651 | 111 | py |
presto | presto-master/bin/guppidrift2fil.py | #!/usr/bin/env python
#Begun on 2/13/2014 from a copy of psrfits2fil.py //NEG-D//
from __future__ import print_function
from builtins import range
import numpy as np
from astropy.io import fits as pyfits
from presto import filterbank
import optparse
import sys
import os
import time
from math import *
# 2/13/2014 //NEG-D///
import pyslalib.slalib as sla
from presto import presto
fil_header_keys = [
"telescope_id",
"machine_id",
"data_type",
"rawdatafile",
"source_name",
"barycentric",
"pulsarcentric",
"az_start",
"za_start",
"src_raj",
"src_dej",
"tstart",
"tsamp",
"nbits",
"fch1",
"foff",
"nchans",
"nifs" ]
telescope_ids = {"Fake": 0, "Arecibo": 1, "ARECIBO 305m": 1, "Ooty": 2, "Nancay": 3,
"Parkes": 4, "Jodrell": 5, "GBT": 6, "GMRT": 7,
"Effelsberg": 8, "ATA": 9, "UTR-2": 10, "LOFAR": 11}
machine_ids = {"FAKE": 0, "PSPM": 1, "Wapp": 2, "WAPP": 2, "AOFTM": 3,
"BCPM1": 4, "OOTY": 5, "SCAMP": 6, "GBT Pulsar Spigot": 7,
"SPIGOT": 7, "BG/P": 11, "pdev": 11}
def read_4bit(data):
"""
Unpack 4-bit PSRFITS data that has been read in as bytes
by pyfits.
Input: array of unsigned 8-bit ints
Output: unpacked array
"""
first_piece = np.bitwise_and(15,data)
#second_piece = np.bitwise_and(240,data) / 16
second_piece = data >> 4
#return np.array([first_piece,second_piece]).transpose().flatten()
return np.dstack([first_piece,second_piece]).flatten()
def read_subint(fits,i_subint,nchan,nsamps, apply_weights=True, \
apply_scales=True, apply_offsets=True, input_nbits=4):
"""
Read a 4-bitized PSRFITS subint from a open pyfits file object.
Applys scales, weights, and offsets to the data.
Input: fits - open pyfits file object
i_subint - index of subint (first subint is 0)
nchan - number of frequency channels
nsamps - number of time samples per subint
apply_weights - If True, apply weights.
(Default: apply weights)
apply_scales - If True, apply scales.
(Default: apply scales)
apply_offsets - If True, apply offsets.
(Default: apply offsets)
Output: subint data with scales, weights, and offsets
applied in float32 dtype with shape (nsamps,nchan).
"""
if input_nbits == 4:
data = read_4bit(fits['SUBINT'].data[i_subint]['DATA'])
elif input_nbits > 4:
data = fits['SUBINT'].data[i_subint]['DATA']
if apply_weights:
offsets = fits['SUBINT'].data[i_subint]['DAT_OFFS']
else:
offsets = 0
if apply_scales:
scales = fits['SUBINT'].data[i_subint]['DAT_SCL']
else:
scales = 1
if apply_weights:
weights = fits['SUBINT'].data[i_subint]['DAT_WTS']
else:
weights = 1
data = data.reshape((nsamps,nchan))
data_wso = ((data * scales) + offsets) * weights
return data_wso
def translate_header(fits_file,skip,output_subints):
fits_hdr = fits_file['PRIMARY'].header
subint_hdr = fits_file['SUBINT'].header
fil_header = dict.fromkeys(fil_header_keys,None)
if fits_hdr['TELESCOP'] in telescope_ids:
fil_header["telescope_id"] = telescope_ids[fits_hdr['TELESCOP']]
else:
fil_header["telescope_id"] = -1
if fits_hdr['BACKEND'] in machine_ids:
fil_header["machine_id"] = machine_ids[fits_hdr['BACKEND']]
else:
fil_header["machine_id"] = -1
fil_header["data_type"] = 1 # filterbank
# Get filename in a way that is safe for old versions of pyfits
# (i.e. using private attribute)
#Let's get the center of our intended output file.
#Right now I'm assuming we're in search mode and that we'll take
#The subint offset from the first file and add skip +
#output_subints / 2 subints to get the position of the telescope.
time_subint = subint_hdr['TBIN']*subint_hdr['NSBLK']
time_offset = subint_hdr['NSUBOFFS'] * time_subint + \
skip * time_subint
#Let's set some variables that we'll use to update our position
#via pyslalib
type = "A" #We need this for SLA
dtmp = 0.0 #Delta UT
atm = 1010.0 #Local atmospheric pressure in mB
temp = 283.0 #Local temperature in DegK
humid = 0.5 #Local relative humidity in range 0.0-1.0
tlr = 0.0065 #Tropospheric lapse rate (DegL/meter)
eq = 2000.0 #Julian epoch of mean place
microns = 3e8 / fits_hdr['OBSFREQ']*1e6*1e6
az = radians(fits_file['SUBINT'].data[0]['TEL_AZ']) #azimuth
za = radians(fits_file['SUBINT'].data[0]['TEL_ZEN']) #zenith
tstart = fits_hdr['STT_IMJD'] + \
fits_hdr['STT_SMJD']/86400.0 + \
fits_hdr['STT_OFFS']/86400.0 + \
time_offset/86400.0 #tstart of new file
#Now let's get the MJD of the center of the new file
#so that we can update the RA/DEC for the new file
MJD = tstart + (time_subint * output_subints / 2)/86400.0
#Ok, I think we're set to update our position. Let's use slalib
#to get the position of the telescope
telecope,telescope_name,tel_lon,tel_lat,tel_hgt = \
sla.sla_obs(0,fits_hdr['TELESCOP'])
#Now we have tel_lon,tel_lat and tel_ght. We need to flip the lon
tel_lon = -tel_lon
#Let's get the geocentric apparent RA and DEC
rap,dap = sla.sla_oap(type,az,za,MJD,dtmp,tel_lon,tel_lat,tel_hgt,\
dtmp,dtmp,temp,atm,humid,microns,tlr)
#OK, for reals we are almost there Let's convert to FK5 (mean place)
rmn,dmn = sla.sla_amp(rap,dap,MJD,eq)
hours,hmin,hsec = presto.hours2hms(degrees(rmn)/15.0)
deg,dmin,dsec = presto.deg2dms(degrees(dmn))
fil_header["src_raj"] = float(hours*10000.0 + hmin*100.0 + hsec)
fil_header["src_dej"] = float(deg*10000.0 + dmin*100.0 + dsec)
fn = fits_file.filename()
fil_header["rawdatafile"] = os.path.basename(fn)
fil_header["source_name"] = fits_hdr['SRC_NAME']
fil_header["barycentric"] = 0 # always not barycentered?
fil_header["pulsarcentric"] = 0 # whats pulsarcentric?
fil_header["az_start"] = fits_file['SUBINT'].data[0]['TEL_AZ']
fil_header["za_start"] = fits_file['SUBINT'].data[0]['TEL_ZEN']
# fil_header["src_raj"] = float(fits_hdr['RA'].replace(':',''))
# fil_header["src_dej"] = float(fits_hdr['DEC'].replace(':',''))
fil_header["tstart"] = fits_hdr['STT_IMJD'] + \
fits_hdr['STT_SMJD']/86400.0 + \
fits_hdr['STT_OFFS']/86400.0 + \
time_offset/86400.0
fil_header["tsamp"] = subint_hdr['TBIN']
fil_header["nbits"] = None # set by user. Input should always be 4-bit.
# first channel (fch1) in sigproc is the highest freq
# foff is negative to signify this
fil_header["fch1"] = fits_hdr['OBSFREQ'] + \
np.abs(fits_hdr['OBSBW'])/2.0 - \
np.abs(subint_hdr['CHAN_BW'])/2.0
fil_header["foff"] = -1.0*np.abs(subint_hdr['CHAN_BW'])
fil_header["nchans"] = subint_hdr['NCHAN']
fil_header["nifs"] = subint_hdr['NPOL']
return fil_header
def main(fits_fn, outfn, nbits, \
apply_weights, apply_scales, apply_offsets,
output_subints, skip):
start = time.time()
firstfits = pyfits.open(fits_fn[0],memmap=True)
nchan = firstfits['SUBINT'].header['NCHAN']
nsamps = firstfits['SUBINT'].header['NSBLK']
nsubints = firstfits['SUBINT'].header['NAXIS2']
fil_header = translate_header(firstfits,skip,output_subints)
fil_header['nbits'] = nbits
outfil = filterbank.create_filterbank_file(outfn, fil_header, \
nbits=nbits)
# if frequency channels are in ascending order
# band will need to be flipped
if firstfits['SUBINT'].header['CHAN_BW'] > 0:
flip_band=True
print("\nFits file frequencies in ascending order.")
print("\tFlipping frequency band.\n")
else:
flip_band=False
# check nbits for input
input_nbits = firstfits['SUBINT'].header['NBITS']
if input_nbits < 4:
raise ValueError('Does not support %d-bit data' % input_nbits)
if nbits != 32:
print("\nCalculating statistics on first subintegration...")
subint0 = read_subint(firstfits,0,nchan,nsamps, \
apply_weights, apply_scales, apply_offsets, \
input_nbits=input_nbits)
#new_max = np.mean(subint0) + 3*np.std(subint0)
new_max = 3 * np.median(subint0)
print("\t3*median =",new_max)
if new_max > 2.0**nbits:
scale = True
scale_fac = new_max / ( 2.0**nbits )
print("\tScaling data by",1/scale_fac)
print("\tValues larger than",new_max,"(pre-scaling) "\
"will be set to",2.0**nbits - 1,"\n")
else:
scale = False
scale_fac = 1
print("\tNo scaling necessary")
print("\tValues larger than",2.0**nbits-1,"(2^nbits) will "\
"be set to ",2.0**nbits-1,"\n")
else:
scale_fac = 1
print("\nNo scaling necessary for 32-bit float output file.")
firstfits.close()
fits = [pyfits.open(filename,memmap=True) for filename in fits_fn]
print("Writing data...")
sys.stdout.flush()
oldpcnt = ""
for i in range(skip+1,output_subints+skip+1):
index = (i-1)//320
subint_in_file = i-1-(index * 320)
subint = read_subint(fits[index],subint_in_file,nchan,nsamps, \
apply_weights, apply_scales, apply_offsets, \
input_nbits=input_nbits)
if flip_band:
subint = np.fliplr(subint)
subint /= scale_fac
outfil.append_spectra(subint)
pcnt = "%d" % (i*100.0/output_subints)
if pcnt != oldpcnt:
sys.stdout.write("% 4s%% complete\r" % pcnt)
sys.stdout.flush()
print("Done ")
outfil.close()
print("Runtime:",time.time() - start)
if __name__=='__main__':
parser = optparse.OptionParser(prog='guppidrift2fil.py', \
version="v0.1 Nate Garver-Daniels (Mar 2014)", \
usage = "usage: %prog [options] input_fits files to read")
parser.add_option("-n",dest='nbits', action='store',
default=8, type='int',
help="The number of bits in the output .fil file. " +\
"Default=8")
parser.add_option("-o",dest='outfn',action='store',
default=None, type='string',
help="The filename of the output filterbank file. " +\
"Default: same as .fits input but with .fil extn")
parser.add_option("--noweights", dest='apply_weights', \
default=True, action="store_false", \
help="Do not apply weights when converting data.")
parser.add_option("--noscales", dest='apply_scales', \
default=True, action="store_false", \
help="Do not apply scales when converting data.")
parser.add_option("--nooffsets", dest='apply_offsets', \
default=True, action="store_false", \
help="Do not apply offsets when converting data.")
parser.add_option("--nsubint", dest='number_subints',\
default="320", type='int',\
help="Specify the number of subints to write.")
parser.add_option("--skip", dest='skip',action='store', \
default="0", type='int',\
help="Specify number of seconds to skip.")
(options, args) = parser.parse_args()
fits_fn = args
print(fits_fn)
if options.outfn:
outfn = options.outfn
else:
parser.print_help()
parser.error("You must supply a valid output file with -o")
main(fits_fn, outfn, options.nbits, options.apply_weights, \
options.apply_scales, options.apply_offsets, \
options.number_subints, options.skip)
| 12,480 | 37.403077 | 84 | py |
presto | presto-master/bin/sortwappfiles.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import range
import sys, re
maxwappnum = 7
wappfiles = {}
for filename in sys.argv[1:]:
for wappnum in range(1, maxwappnum + 1):
if ((wappnum == 1 and re.search("\.wapp\.", filename)) or \
(wappnum > 1 and re.search("\.wapp%d?\." % wappnum, filename))):
if wappnum in wappfiles:
wappfiles[wappnum].append(filename)
else:
wappfiles[wappnum] = [filename]
break
for key in list(wappfiles.keys()):
numfiles = len(wappfiles[key])
wappfiles[key].sort()
for filenum in range(numfiles):
for wappnum in range(1, maxwappnum + 1):
if wappnum in wappfiles:
print(wappfiles[wappnum][filenum], end=' ')
| 794 | 29.576923 | 80 | py |
presto | presto-master/bin/make_spd.py | #!/usr/bin/env python
"""
make_spd.py
Make single pulse plots which include the waterfall plots and dedispersed time series with Zero-DM On/Off.
Also includes Signal-to-noise vs DM and DM vs Time subplots.
Usage on the command line:
python make_spd.py [OPTIONS] <psrfits file> <singlepulse files>
Chitrang Patel - May. 21, 2015 -- Updated on June 10 2016
"""
from __future__ import print_function
from builtins import map
from builtins import range
from time import strftime
import numpy as np
import optparse
from presto import waterfaller
from presto import psr_utils
# todo: (gijs) plot_spd is a script, not a module.
import plot_spd
from presto.singlepulse import spcand as spcand
from presto.singlepulse import spio as spio
from presto import psrfits
from presto import filterbank
DEBUG = True
def print_debug(msg):
if DEBUG:
print(msg)
def waterfall_array(rawdatafile, start, duration, dm, nbins, nsub, subdm, zerodm, \
downsamp, scaleindep, width_bins, mask, maskfn, bandpass_corr):
"""
Runs the waterfaller. If dedispersing, there will be extra bins added to the 2D plot.
Inputs:
Inputs required for the waterfaller. dm, nbins, etc.
Outputs:
data: 2D array as an "object"
array: 2D array ready to be plotted by sp_pgplot.plot_waterfall(array).
"""
data, bins, nbins, start = waterfaller.waterfall(rawdatafile, start, duration, dm=dm, nbins=nbins, \
nsub=nsub, subdm=subdm, zerodm=zerodm, \
downsamp=downsamp, scaleindep=scaleindep, \
width_bins=width_bins, mask=mask, \
maskfn=maskfn, bandpass_corr=bandpass_corr)
array = np.array(data.data)
if dm is not None: # If dedispersing the data, extra bins will be added. We need to cut off the extra bins to get back the appropriate window size.
ragfac = float(nbins)/bins
dmrange, trange = array.shape
nbinlim = np.int64(trange * ragfac)
else:
nbinlim = nbins
array = array[..., :nbinlim]
array = (array[::-1]).astype(np.float16)
return data, array
def make_spd_from_file(spdcand, rawdatafile, \
txtfile, maskfile, \
min_rank, group_rank, \
plot, just_waterfall, \
integrate_ts, integrate_spec, disp_pulse, \
loc_pulse, nsub, \
maxnumcands, \
basename, \
mask=False, bandpass_corr=True, barytime=True, \
man_params=None):
"""
Makes spd files from output files of rratrap.
Inputs:
spdcand: spcand parameters instance (read in spcand.params)
rawdatafile: psrfits file instance
txtfile: rratrap output file (groups.txt file)
maskfile: rfifind mask file. need this file if you want to remove the bandpass
or use rfifind mask information.
min_rank: plot all groups with rank more than this. min 1, max 6
group_rank: plot groups ranked whatever you specify
plot: do you want to produce the plots as well?
just_waterfall: Do you just want to make the waterfall plots.
integrate_ts: Do you want to display the dedispersed time series in the plot?
integrate_spec: Do you want to display the pulse spectrum in the plot?
disp_pulse: Do you want to see the inset dispersed pulse in the plot?
loc_pulse: Fraction of the window length where the pulse is located.(eg. 0.25 = 1/4th of the way in.
0.5 = middle of the plot)
maxnumcands: What is the maximum number of candidates you would like to generate?
basename: output basename of the file. Appended with _DM_TIME(s)_RANK.spd
Optional arguments:
mask: Do you want to mask out rfi contaminated channels?
bandpass_corr: Do you want to remove the bandpass?
barytime: Is the given time(s) barycentric?
man_params: Do you want to specify the parameters for waterfalling
manually? If yes, I suggest using the function make_spd_from_man_params().
(I suggest giving it the rratrap output file)
Outputs:
Binary npz file containing the necessary arrays and header information to generate the spd plots.
"""
numcands=0 # counter for max number of candidates
loop_must_break = False # dont break the loop unless num of cands >100.
files = spio.get_textfile(options.txtfile)
if group_rank:
groups=[group_rank-1]
else:
groups = [i for i in range(6) if(i>=min_rank)][::-1]
for group in groups:
rank = group+1
if files[group] != "Number of rank %i groups: 0 "%rank:
values = spio.split_parameters(rank, txtfile)
lis = np.where(files == '\tRank: %i.000000'%rank)[0]
for ii in range(len(values)):
#### Arrays for Plotting DM vs SNR
dm_list, time_list, dm_arr, sigma_arr, width_arr = spio.read_RRATrap_info(txtfile, lis[ii], rank)
# Array for Plotting Dedispersed waterfall plot - zerodm - OFF
spdcand.read_from_file(values[ii], rawdatafile.tsamp, rawdatafile.specinfo.N, \
rawdatafile.frequencies[0], rawdatafile.frequencies[-1], \
rawdatafile, loc_pulse=loc_pulse, dedisp = True, \
scaleindep = None, zerodm = None, mask = mask, \
barytime=barytime, \
nsub = nsub, bandpass_corr = bandpass_corr)
#make an array to store header information for the spd files
temp_filename = basename+"_DM%.1f_%.1fs_rank_%i"%(spdcand.subdm, \
spdcand.topo_start_time, rank)
print_debug("Running waterfaller with Zero-DM OFF...")
# Add additional information to the header information array
data, Data_dedisp_nozerodm = waterfall_array(rawdatafile, spdcand.start, \
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \
spdcand.subdm, spdcand.zerodm, spdcand.downsamp, \
spdcand.scaleindep, spdcand.width_bins, \
spdcand.mask, maskfile, spdcand.bandpass_corr)
text_array = np.array([args[0], rawdatafile.specinfo.telescope, \
rawdatafile.specinfo.ra_str, rawdatafile.specinfo.dec_str, \
rawdatafile.specinfo.start_MJD[0], \
rank, spdcand.nsub, spdcand.nbins, spdcand.subdm, \
spdcand.sigma, spdcand.sample_number, spdcand.duration, \
spdcand.width_bins, spdcand.pulse_width, rawdatafile.tsamp,\
rawdatafile.specinfo.T, spdcand.topo_start_time, data.starttime, \
data.dt,data.numspectra, data.freqs.min(), data.freqs.max()])
#### Array for plotting Dedispersed waterfall plot zerodm - ON
print_debug("Running Waterfaller with Zero-DM ON...")
zerodm=True
data, Data_dedisp_zerodm = waterfall_array(rawdatafile, spdcand.start, \
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \
spdcand.subdm, zerodm, spdcand.downsamp, \
spdcand.scaleindep, spdcand.width_bins, \
spdcand.mask, maskfile, spdcand.bandpass_corr)
####Sweeped without zerodm
spdcand.read_from_file(values[ii], rawdatafile.tsamp, rawdatafile.specinfo.N, \
rawdatafile.frequencies[0], rawdatafile.frequencies[-1], \
rawdatafile, loc_pulse=loc_pulse, dedisp = None, \
scaleindep = None, zerodm = None, mask = mask, \
barytime=barytime, \
nsub = nsub, bandpass_corr = bandpass_corr)
data, Data_nozerodm = waterfall_array(rawdatafile, spdcand.start, \
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \
spdcand.subdm, spdcand.zerodm, spdcand.downsamp, \
spdcand.scaleindep, spdcand.width_bins, \
spdcand.mask, maskfile, spdcand.bandpass_corr)
text_array = np.append(text_array, spdcand.sweep_duration)
text_array = np.append(text_array, data.starttime)
text_array = np.append(text_array, spdcand.bary_start_time)
text_array = np.append(text_array, man_params)
# Array to Construct the sweep
if spdcand.sweep_dm is not None:
ddm = spdcand.sweep_dm-data.dm
delays = psr_utils.delay_from_DM(ddm, data.freqs)
delays -= delays.min()
delays_nozerodm = delays
freqs_nozerodm = data.freqs
# Sweeped with zerodm-on
zerodm = True
#downsamp_temp = 1
data, Data_zerodm = waterfall_array(rawdatafile, spdcand.start, \
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \
spdcand.subdm, zerodm, spdcand.downsamp, \
spdcand.scaleindep, spdcand.width_bins, \
spdcand.mask, maskfile, spdcand.bandpass_corr)
# Saving the arrays into the .spd file.
with open(temp_filename+".spd", 'wb') as f:
np.savez_compressed(f, \
Data_dedisp_nozerodm = Data_dedisp_nozerodm.astype(np.float16),\
Data_dedisp_zerodm = Data_dedisp_zerodm.astype(np.float16),\
Data_nozerodm = Data_nozerodm.astype(np.float16),\
delays_nozerodm = delays_nozerodm, \
freqs_nozerodm = freqs_nozerodm,\
Data_zerodm = Data_zerodm.astype(np.float16), \
dm_arr= list(map(np.float16, dm_arr)),\
sigma_arr = list(map(np.float16, sigma_arr)), \
width_arr =list(map(np.uint8, width_arr)),\
dm_list= list(map(np.float16, dm_list)), \
time_list = list(map(np.float16, time_list)), \
text_array = text_array)
#### Arrays for Plotting DM vs Time is in plot_spd.plot(...)
if plot:
print_debug("Now plotting...")
plot_spd.plot(temp_filename+".spd", args[1:], \
spec_width=1.5, loc_pulse=loc_pulse, \
xwin=False, outfile=basename, \
just_waterfall=just_waterfall, \
integrate_spec=integrate_spec, \
integrate_ts=integrate_ts, \
disp_pulse=disp_pulse, tar = None)
print_debug("Finished plot %i " %ii+strftime("%Y-%m-%d %H:%M:%S"))
numcands+= 1
print_debug('Finished sp_candidate : %i'%numcands)
if numcands >= maxnumcands: # Max number of candidates to plot 100.
loop_must_break = True
break
if loop_must_break:
break
print_debug("Finished group %i... "%rank+strftime("%Y-%m-%d %H:%M:%S"))
print_debug("Finished running waterfaller... "+strftime("%Y-%m-%d %H:%M:%S"))
def make_spd_from_man_params(spdcand, rawdatafile, \
txtfile, maskfile, \
plot, just_waterfall, \
subdm, dm, sweep_dm, \
sigma, \
start_time, duration, \
width_bins, nbins, downsamp, \
nsub, \
scaleindep, \
spec_width, loc_pulse, \
integrate_ts, integrate_spec, disp_pulse, \
basename, \
mask, bandpass_corr, barytime, man_params):
"""
Makes spd files from output files of rratrap.
Inputs:
spdcand: spcand parameters instance (read in spcand.params)
rawdatafile: psrfits file instance
txtfile: rratrap output file (groups.txt file)
maskfile: rfifind mask file. need this file if you want to remove the bandpass
or use rfifind mask information.
plot: do you want to produce the plots as well?
just_waterfall: Do you just want to make the waterfall plots.
subdm: DM to use when subbanding.
dm: DM to use when dedispersing data for plot.
sweep_dm: Show the frequency sweep using this DM.
sigma: signal-to-noise of the pulse
start_time: start time of the data to be read in for waterfalling.
duration: duration of data to be waterfalled.
width_bins: Smooth each channel/subband with a boxcar width_bins wide.
nbins: Number of time bins to plot. This option overrides
the duration argument.
downsamp: Factor to downsample in time by. Default: Don't downsample.
nsub: Number of subbands to use. Must be a factor of number of channels.
scaleindep:Do you want to scale each subband independently?(Type: Boolean)
spec_width: Twice this number times the pulse_width around the pulse to consider for the spectrum
loc_pulse: Fraction of the window length where the pulse is located.(eg. 0.25 = 1/4th of the way in.
0.5 = middle of the plot)
integrate_ts: Do you want to display the dedispersed time series in the plot?
integrate_spec: Do you want to display the pulse spectrum in the plot?
disp_pulse: Do you want to see the inset dispersed pulse in the plot?
basename: output basename of the file. Appended with _DM_TIME(s)_RANK.spd
mask: Do you want to mask out rfi contaminated channels?
bandpass_corr: Do you want to remove the bandpass?
barytime: Is the given time(s) barycentric?
man_params: Do you want to specify the parameters for waterfalling
manually? If yes, I suggest using the function make_spd_from_man_params().
(I suggest giving it the rratrap output file)
Outputs:
Binary npz file containing the necessary arrays and header information to generate the spd plots.
"""
rank = None
if not nsub:
nsub = rawdatafile.nchan
# Array for Plotting Dedispersed waterfall plot - zerodm - OFF
spdcand.manual_params(subdm, dm, sweep_dm, sigma, start_time, \
width_bins, downsamp, duration, nbins, nsub, rawdatafile.tsamp, \
rawdatafile.specinfo.N, \
rawdatafile.frequencies[0], rawdatafile.frequencies[-1], rawdatafile, \
loc_pulse=loc_pulse, dedisp=True, scaleindep=False, zerodm=False, \
mask=mask, barytime=barytime, bandpass_corr=bandpass_corr)
#make an array to store header information for the spd files
temp_filename = basename+"_DM%.1f_%.1fs"%(spdcand.subdm, spdcand.topo_start_time)
print_debug("Running waterfaller with Zero-DM OFF...")
data, Data_dedisp_nozerodm = waterfall_array(rawdatafile, spdcand.start, \
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \
spdcand.subdm, spdcand.zerodm, spdcand.downsamp, \
spdcand.scaleindep, spdcand.width_bins, \
spdcand.mask, maskfile, spdcand.bandpass_corr)
# Add additional information to the header information array
text_array = np.array([args[0], rawdatafile.specinfo.telescope, \
rawdatafile.specinfo.ra_str, rawdatafile.specinfo.dec_str, \
rawdatafile.specinfo.start_MJD[0], rank, \
spdcand.nsub, spdcand.nbins, \
spdcand.subdm, spdcand.sigma, spdcand.sample_number, \
spdcand.duration, spdcand.width_bins, spdcand.pulse_width, \
rawdatafile.tsamp, rawdatafile.specinfo.T, spdcand.topo_start_time, \
data.starttime, data.dt,data.numspectra, data.freqs.min(), \
data.freqs.max()])
#### Array for plotting Dedispersed waterfall plot zerodm - ON
print_debug("Running Waterfaller with Zero-DM ON...")
zerodm=True
data, Data_dedisp_zerodm = waterfall_array(rawdatafile, spdcand.start, \
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \
spdcand.subdm, zerodm, spdcand.downsamp, \
spdcand.scaleindep, spdcand.width_bins, \
spdcand.mask, maskfile, spdcand.bandpass_corr)
####Sweeped without zerodm
spdcand.manual_params(subdm, dm, sweep_dm, sigma, start_time, \
width_bins, downsamp, duration, nbins, nsub, rawdatafile.tsamp, \
rawdatafile.specinfo.N, \
rawdatafile.frequencies[0], rawdatafile.frequencies[-1], rawdatafile, \
loc_pulse=loc_pulse, dedisp=None, scaleindep=None, zerodm=None, mask=mask, \
barytime=barytime, bandpass_corr=bandpass_corr)
data, Data_nozerodm = waterfall_array(rawdatafile, spdcand.start, \
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \
spdcand.subdm, spdcand.zerodm, spdcand.downsamp, \
spdcand.scaleindep, spdcand.width_bins, \
spdcand.mask, maskfile, spdcand.bandpass_corr)
text_array = np.append(text_array, spdcand.sweep_duration)
text_array = np.append(text_array, data.starttime)
text_array = np.append(text_array, spdcand.bary_start_time)
text_array = np.append(text_array, man_params)
# Array to Construct the sweep
if spdcand.sweep_dm is not None:
ddm = spdcand.sweep_dm-data.dm
delays = psr_utils.delay_from_DM(ddm, data.freqs)
delays -= delays.min()
delays_nozerodm = delays
freqs_nozerodm = data.freqs
# Sweeped with zerodm-on
zerodm = True
#downsamp_temp = 1
data, Data_zerodm = waterfall_array(rawdatafile, spdcand.start, \
spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \
spdcand.subdm, zerodm, spdcand.downsamp, \
spdcand.scaleindep, spdcand.width_bins, \
spdcand.mask, maskfile, spdcand.bandpass_corr)
with open(temp_filename+".spd", 'wb') as f:
np.savez_compressed(f, \
Data_dedisp_nozerodm = Data_dedisp_nozerodm.astype(np.float16),\
Data_dedisp_zerodm = Data_dedisp_zerodm.astype(np.float16),\
Data_nozerodm = Data_nozerodm.astype(np.float16),\
delays_nozerodm = delays_nozerodm, \
freqs_nozerodm = freqs_nozerodm,\
Data_zerodm = Data_zerodm.astype(np.float16), \
text_array = text_array)
#### Arrays for Plotting DM vs Time is in plot_spd.plot(...)
if plot:
print_debug("Now plotting...")
plot_spd.plot(temp_filename+".spd", args[1:], \
spec_width=spec_width, loc_pulse=loc_pulse, xwin=False, \
outfile = basename, just_waterfall=just_waterfall, \
integrate_spec=integrate_spec, integrate_ts=integrate_ts, \
disp_pulse=disp_pulse, tar = None)
def main():
fn = args[0]
if fn.endswith(".fil"):
# Filterbank file
filetype = "filterbank"
print_debug("Reading filterbank file..")
rawdatafile = filterbank.FilterbankFile(fn)
basename = fn[:-4]
elif fn.endswith(".fits"):
# PSRFITS file
filetype = "psrfits"
print_debug("Reading PSRFITS file..")
rawdatafile = psrfits.PsrfitsFile(fn)
basename = fn[:-5]
else:
raise ValueError("Cannot recognize data file type from "
"extension. (Only '.fits' and '.fil' "
"are supported.)")
if options.outbasenm:
basename=options.outbasenm
spdcand = spcand.params()
if not options.man_params:
print_debug('Maximum number of candidates to plot: %i'%options.maxnumcands)
make_spd_from_file(spdcand, rawdatafile, \
options.txtfile, options.maskfile, \
options.min_rank, options.group_rank, \
options.plot, options.just_waterfall, \
options.integrate_ts, options.integrate_spec, options.disp_pulse, \
options.loc_pulse, options.nsub, \
options.maxnumcands, \
basename, \
mask=options.mask, barytime=options.barytime, \
bandpass_corr=options.bandpass_corr)
else:
print_debug("Making spd files based on mannual parameters. I suggest" \
"reading in parameters from the groups.txt file.")
make_spd_from_man_params(spdcand, rawdatafile, \
options.txtfile, options.maskfile, \
options.plot, options.just_waterfall, \
options.subdm, options.dm, options.sweep_dms, \
options.sigma, \
options.start, options.duration, \
options.width_bins, options.nbins, options.downsamp, \
options.nsub, \
options.scaleindep, \
options.spec_width, options.loc_pulse, \
options.integrate_ts, options.integrate_spec, options.disp_pulse, \
basename, \
options.mask, options.bandpass_corr, options.barytime, \
options.man_params)
if __name__=='__main__':
parser = optparse.OptionParser(prog="sp_pipeline..py", \
version=" Chitrang Patel (May. 12, 2015)", \
usage="%prog INFILE(PsrFits FILE, SINGLEPULSE FILES)", \
description="Create single pulse plots to show the " \
"frequency sweeps of a single pulse, " \
"DM vs time, and SNR vs DM,"\
"in psrFits data.")
parser.add_option('--groupsfile', dest='txtfile', type='string', \
help="Give the groups.txt file to read in the groups information.", \
default=None)
parser.add_option('--maskfile', dest='maskfile', type='string', \
help="Mask file produced by rfifind. Used for " \
"masking and bandpass correction.", \
default=None)
parser.add_option('--mask', dest='mask', action="store_true", \
help="Mask data using rfifind mask (Default: Don't mask).", \
default=False)
parser.add_option('--numcands', dest='maxnumcands', type='int', \
help="Maximum number of candidates to plot. (Default: 100).", \
default=100)
parser.add_option('--subdm', dest='subdm', type='float', \
help="DM to use when subbanding. (Default: " \
"same as --dm)", default=None)
parser.add_option('-s', '--nsub', dest='nsub', type='int', \
help="Number of subbands to use. Must be a factor " \
"of number of channels. (Default: " \
"number of channels)", default=None)
parser.add_option('--sigma', dest='sigma', type='float', \
help="Signal-to-Noise of the pulse." \
"(Default: Do not specify. In this case you must specify the " \
"number of subbands.)", \
default=None)
parser.add_option('-d', '--dm', dest='dm', type='float', \
help="DM to use when dedispersing data for plot. " \
"(Default: 0 pc/cm^3)", default=0.0)
parser.add_option('--show-ts', dest='integrate_ts', action='store_true', \
help="Plot the time series. " \
"(Default: Dont show the time series)", default=False)
parser.add_option('--show-spec', dest='integrate_spec', action='store_true', \
help="Plot the spectrum. " \
"(Default: Do not show the spectrum)", default=False)
parser.add_option("--spec-width", dest="spec_width", type="float", help="Twice " \
"this number times the pulse width is the window around the " \
"pulse considered for the spectrum. (Default: 1.5)", \
default=1.5)
parser.add_option("--loc", dest="loc_pulse", type="float", help="Fraction of " \
"the window length where the pulse is located." \
"(Default: 0.5: half way in.)", \
default=0.5)
parser.add_option('--show-sweep', dest='disp_pulse', action='store_true', \
help="Plot the inset dispersed pulse. " \
"(Default: Do not show the dispersed pulse)", default=False)
parser.add_option('--bandpass', dest='bandpass_corr', action='store_true', \
help="Correct for the bandpass. Requires an rfifind " \
"mask provided by --mask option." \
"(Default: Do not remove bandpass)", default=False)
parser.add_option('-T', '--start-time', dest='start', type='float', \
help="Time into observation (in seconds) at which " \
"to start plot.")
parser.add_option('--notopo', dest='barytime', action='store_false', \
help="Do not topocenter the given time. Use this option " \
"only if the given time is topocentric." \
"(Default: topocenter the given barycentric time)", default=True)
parser.add_option('-t', '--duration', dest='duration', type='float', \
help="Duration (in seconds) of plot.")
parser.add_option('-n', '--nbins', dest='nbins', type='int', \
help="Number of time bins to plot. This option takes " \
"precedence over -t/--duration if both are " \
"provided.")
parser.add_option('--width-bins', dest='width_bins', type='int', \
help="Smooth each channel/subband with a boxcar " \
"this many bins wide. (Default: Don't smooth)", \
default=1)
parser.add_option('--sweep-dm', dest='sweep_dms', type='float', \
action='append', \
help="Show the frequency sweep using this DM. " \
"(Default: Don't show sweep)", default=[])
parser.add_option('--sweep-posn', dest='sweep_posns', type='float', \
action='append', \
help="Show the frequency sweep at this position. " \
"The position refers to the high-frequency " \
"edge of the plot. Also, the position should " \
"be a number between 0 and 1, where 0 is the " \
"left edge of the plot. "
"(Default: 0)", default=None)
parser.add_option('--downsamp', dest='downsamp', type='int', \
help="Factor to downsample data by. (Default: 1).", \
default=1)
parser.add_option('--scaleindep', dest='scaleindep', action='store_true', \
help="If this flag is set scale each channel " \
"independently. (Default: Scale using " \
"global maximum.)", \
default=False)
parser.add_option('--min-rank', dest='min_rank', type='int',\
help="Min rank you want to make spd files for. (Default: 3)"\
" Rank 1: noise,"\
" Rank 2: RFI,"\
" Rank 3: maybe astrophysical, very low S/N,"\
" Rank 4: probably astrophysical but weak, low S/N,"\
" Rank 5: Very high chance of being astrophysical. S/N>8.0,"\
" Rank 6: Almost guranteed to be astrophysical. S/N>9.2,",\
default=3)
parser.add_option('--group-rank', dest='group_rank', type='int',\
help="Min rank you want to make spd files for. (Default: None)"\
" Rank 1: noise,"\
" Rank 2: RFI,"\
" Rank 3: maybe astrophysical, very low S/N,"\
" Rank 4: probably astrophysical but weak, low S/N,"\
" Rank 5: Very high chance of being astrophysical. S/N>8.0,"\
" Rank 6: Almost guranteed to be astrophysical. S/N>9.2,",\
default=None)
parser.add_option('--use_manual_params', dest='man_params', action='store_true', \
help="If this flag is not set it will use the parameters " \
"from the RRATrap groups.txt file. "\
"(Default: Not use this flag. When using "\
"parameters from the output of rratrap. Just input"\
"groups.txt file, mask file, the PSRFITs file"\
"and the .singlepulse files as input. No need to specify any of"\
" the other arguments.)",\
default=False)
parser.add_option('-o', dest='outbasenm', type='string', \
help="basename of the output spd file.", \
default=None)
parser.add_option('--noplot', dest='plot', action='store_false', \
help="Do not generate spd plots.", \
default=True)
parser.add_option('--just-waterfall', dest='just_waterfall', action='store_true', \
help="Only produce the waterfall plots (frequency vs Time).", \
default=False)
options, args = parser.parse_args()
if not (args[0].endswith("fits") or args[0].endswith("fil")):
raise ValueError("The first file must be a psrFits or a filterbank file! ")
if (hasattr(options, 'bandpass_corr')) and (not hasattr(options, 'maskfile')):
raise ValueError("For bandpass correction you need to supply a mask file.")
if not hasattr(options, 'man_params'):
if not hasattr(options, 'txtfile'):
raise ValueError("The groups.txt file must be given on the command line! ")
else:
if not hasattr(options, 'start'):
raise ValueError("Start time (-T/--start-time) " \
"must be given on command line!")
if (not hasattr(options, 'duration')) and (not hasattr(options, 'nbins')):
raise ValueError("One of duration (-t/--duration) " \
"and num bins (-n/--nbins)" \
"must be given on command line!")
if options.subdm is None:
options.subdm = options.dm
main()
| 34,019 | 59 | 165 | py |
presto | presto-master/tests/dedisp.py | import os
# To use this script to help you dedisperse a bunch of time series, first
# run DDplan.py with appropriate values for your data to generate a
# dedispersion plan:
#
# sransom@foops:~$ DDplan.py -d 200 -t 0.000072 -s 32 -n 96 -b 48.0 -f 820.0
#
# Minimum total smearing : 0.102 ms
# --------------------------------------------
# Minimum channel smearing : 3.76e-05 ms
# Minimum smearing across BW : 0.00361 ms
# Minimum sample time : 0.072 ms
#
# Setting the new 'best' resolution to : 0.072 ms
# Best guess for optimal initial dDM is 0.199
#
# Low DM High DM dDM DownSamp dsubDM #DMs DMs/call calls WorkFract
# 0.000 38.400 0.20 1 4.80 192 24 8 0.7273
# 38.400 60.000 0.30 2 7.20 72 24 3 0.1364
# 60.000 108.000 0.50 4 12.00 96 24 4 0.09091
# 108.000 204.000 1.00 8 24.00 96 24 4 0.04545
#
#
# Now with that plan, fill in the lists below and appropriate variables
# for your data and you can then generate the subbands and time series
# using "python dedisp.py"
#
def myexecute(cmd):
print("'%s'"%cmd)
os.system(cmd)
# dDM steps from DDplan.py
dDMs = [2.0, 3.0]
# dsubDM steps
dsubDMs = [48., 72.]
# downsample factors
downsamps = [4, 8]
# number of calls per set of subbands
subcalls = [7, 3]
# The low DM for each set of DMs
startDMs = [0.0, 336.0]
# DMs/call
dmspercall = 24
# Number of subbands
nsub = 32
# The basename of the output files you want to use
basename = "Lband"
# The name of the raw data file (or files if you use wildcards) to use
rawfiles = "GBT*.fil"
# The name of the maskfile to apply (if no mask, use None)
maskfile = basename+"_rfifind.mask"
# Loop over the DDplan plans
for dDM, dsubDM, downsamp, subcall, startDM in \
zip(dDMs, dsubDMs, downsamps, subcalls, startDMs):
# Get our downsampling right
subdownsamp = downsamp/2
datdownsamp = 2
if downsamp < 2: subdownsamp = datdownsamp = 1
# Loop over the number of calls
for ii in range(subcall):
subDM = startDM + (ii+0.5)*dsubDM
# First create the subbands
if maskfile:
myexecute("prepsubband -mask %s -sub -subdm %.2f -nsub %d -downsamp %d -o %s %s" %
(maskfile, subDM, nsub, subdownsamp, basename, rawfiles))
else:
myexecute("prepsubband -sub -subdm %.2f -nsub %d -downsamp %d -o %s %s" %
(subDM, nsub, subdownsamp, basename, rawfiles))
# And now create the time series
loDM = startDM + ii*dsubDM
subnames = basename+"_DM%.2f.sub[0-9]*"%subDM
myexecute("prepsubband -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d -o %s %s" %
(loDM, dDM, dmspercall, datdownsamp, basename, subnames))
| 2,855 | 35.151899 | 94 | py |
presto | presto-master/tests/test_presto_python.py | from __future__ import print_function
import numpy as np
from presto import presto
import os
from os import path
here = path.dirname(__file__)
print("Testing FFT stuff...", end=' ')
N = 20
x = np.random.standard_normal(N)
nx = presto.rfft(presto.rfft(x, -1), 1)
assert(np.allclose(x, nx, atol=1e-6))
print("success")
print("Testing FFTW call...", end=' ')
cx = np.random.standard_normal(N).astype(np.complex64)
ncx = np.array(cx, copy=1)
presto.fftwcall(cx, -1)
presto.fftwcall(cx, 1)
assert(np.allclose(cx/N, ncx, atol=1e-6))
print("success")
print("Testing tablesixstepfft call...", end=' ')
cx = np.random.standard_normal(N).astype(np.complex64)
ncx = np.array(cx, copy=1)
presto.tablesixstepfft(cx, -1)
presto.tablesixstepfft(cx, 1)
assert(np.allclose(cx/N, ncx, atol=1e-6))
print("success")
print("Testing reading infiles...", end=' ')
x = presto.read_inffile(path.join(here, "1937_DM71.02_zerodm.inf"), verbose=False)
assert(x.telescope=="GBT")
assert(x.mjd_i==55267)
assert(x.dt==8.192e-05)
assert(x.numonoff==1)
assert(x.analyzer=="sransom")
print("success")
print("Testing writing infiles...", end=' ')
x.analyzer="test"
x.name="xxx"
x.dt=0.125
presto.write_inffile(x, verbose=False)
y = presto.read_inffile("xxx", verbose=False)
assert(y.analyzer=="test")
assert(y.bary==0)
assert(y.numonoff==1)
assert(y.dt==0.125)
os.remove("xxx.inf")
print("success")
print("""Note: If the following fails, edit $PRESTO/Makefile and follow
the instructions about the CFLAGS USE_FFTW_MALLOC option""")
print("Testing allocation and freeing of memory...", end=' ')
for ii in range(1024):
a = presto.gen_fvect(1024 * 32768)
del a
for ii in range(1024):
a = presto.gen_cvect(1024 * 16384)
del a
print("success")
print("Testing psrparams and orbitparams stuff...", end=' ')
psr = presto.psrepoch("J0737-3039A", 56000.0, verbose=False)
assert(round(psr.dm-48.92, 7)==0)
# This needs to change when we start using the actual psrcat.db file
assert(round(psr.orb.p-8834.534998272, 7)==0)
print("success")
print("Testing spectralpower and spectralphase...", end=' ')
a = np.arange(5.0) + complex(0.0, 1.0)
assert(np.allclose(presto.spectralpower(a),
np.arange(5.0)**2.0 + 1))
assert(np.allclose(presto.spectralphase(a),
np.array([90., 45., 26.56505203, 18.43494797, 14.03624344])))
print("success")
print("Testing vector shifting / rotation...", end=' ')
a = np.arange(4, dtype=np.float32)
presto.frotate(a, 1)
assert(np.allclose(a, np.array([1, 2, 3, 0])))
a = np.arange(4, dtype=np.float64)
presto.drotate(a, 1)
assert(np.allclose(a, np.array([1, 2, 3, 0])))
print("success")
print("Testing orbit integration stuff...", end=' ')
orb = presto.orbitparams()
orb.p = 10000.0
orb.e = 0.1
orb.x = 1.0
orb.t = 1234.0
orb.w = 75.0
orb.pd = orb.wd = 0.0
E0 = presto.keplers_eqn(orb.t+0.0, orb.p, orb.e, 1e-15)
E1 = presto.keplers_eqn(orb.t+100.0, orb.p, orb.e, 1e-15)
E2 = presto.keplers_eqn(orb.t+200.0, orb.p, orb.e, 1e-15)
E3 = presto.keplers_eqn(orb.t+300.0, orb.p, orb.e, 1e-15)
Es = np.asarray([E0, E1, E2, E3])
Es_check = np.asarray([ 0.85050653, 0.9175909,
0.9842971, 1.05061346])
assert(np.allclose(Es, Es_check))
Es_new = presto.dorbint(E0, 4, 100.0, orb)
assert(np.allclose(Es_new, Es_check))
presto.E_to_v(Es, orb)
Vs_check = np.asarray([-112.15558594, -122.45299212,
-131.9991447, -140.76659065])
assert(np.allclose(Es, Vs_check))
minv, maxv = presto.binary_velocity(300.0, orb)
minv *= presto.SOL/1000.0
maxv *= presto.SOL/1000.0
assert(round(minv-Vs_check.min(), 7)==0)
assert(round(maxv-Vs_check.max(), 7)==0)
print("success")
print("Testing Fourier response generation...", end=' ')
numbetween = 16
z = 5.0
w = 40.0
bins_per_side = max([presto.r_resp_halfwidth(presto.LOWACC), \
presto.z_resp_halfwidth(z, presto.LOWACC), \
presto.w_resp_halfwidth(z, w, presto.LOWACC)])
nn = numbetween * bins_per_side * 2;
rresp = presto.gen_r_response(0.0, numbetween, nn)
zresp = presto.gen_z_response(0.0, numbetween, nn, z)
wresp = presto.gen_w_response(0.0, numbetween, nn, z, w)
pr = presto.spectralpower(rresp)
pz = presto.spectralpower(zresp)
pw = presto.spectralpower(wresp)
rs = np.arange(float(nn))/numbetween - bins_per_side
if False:
import matplotlib.pyplot as plt
plt.plot(rs, pr, 'b-')
plt.plot(rs, pz, 'g-')
plt.plot(rs, pw, 'r-')
plt.show()
assert(rs[nn//2]==0.0)
assert(pr[nn//2]==1.0)
assert(round(pz[nn//2]-0.227675, 6)==0)
assert(round(pw[nn//2]-0.019462, 6)==0)
print("success")
print("Testing angle functions...", end=' ')
dd1 = 15.25
dd2 = presto.dms2rad(*presto.deg2dms(dd1))*presto.RADTODEG
assert(round(dd1-dd2, 12)==0)
dd1 = -0.5
dd2 = presto.dms2rad(*presto.deg2dms(dd1))*presto.RADTODEG
assert(round(dd1-dd2, 12)==0)
hh1 = 12.125
hh2 = presto.hms2rad(*presto.hours2hms(hh1))*presto.RADTODEG/15.0
assert(round(hh1-hh2, 12)==0)
hh1 = -0.5
hh2 = presto.hms2rad(*presto.hours2hms(hh1))*presto.RADTODEG/15.0
assert(round(hh1-hh2, 12)==0)
ang = presto.sphere_ang_diff(10.0*presto.DEGTORAD, 10.0*presto.DEGTORAD,
50.0*presto.DEGTORAD, -10.0*presto.DEGTORAD)
assert(round(160334.960*presto.ARCSEC2RAD-ang, 7)==0)
print("success")
# Only run this test if TEMPO is available
envval = os.getenv("TEMPO")
if envval is not None:
print("Testing get_baryv (barycenter)...", end=' ')
vavg1 = presto.get_baryv("18:24:32.9520", "-24:52:12.0000",
56421.44222222222222, 214.5386496, obs="GB")
vavg2 = -7.2069293455783169e-05
assert(round(vavg1-vavg2, 10)==0)
print("success")
else:
print("Skipping test of presto.get_baryv() since TEMPO not set.")
print("Testing simple folding code...", end=' ')
prof, phs = presto.fold(np.ones(10000), 0.001, 10, 1)
assert(np.allclose(prof, prof.mean()))
assert(np.all(prof>0))
prof, phs = presto.fold(np.ones(10000), 0.001, 100, 1)
assert(np.allclose(prof, prof.mean()))
assert(np.all(prof>0))
prof, phs = presto.fold(np.ones(10000), 0.001, 200, 1)
assert(np.allclose(prof, prof.mean()))
assert(np.all(prof>0))
prof, phs = presto.fold(np.ones(10000), 0.001, 500, 1)
assert(np.allclose(prof, prof.mean()))
assert(np.all(prof>0))
prof, phs = presto.fold(np.ones(10000), 0.001, 100, 1, tlo=-1.12313)
assert(np.allclose(prof, prof.mean()))
assert(np.all(prof>0))
prof, phs = presto.fold(np.ones(10000), 0.001, 100, 1, startphs=0.65765)
assert(np.allclose(prof, prof.mean()))
assert(np.all(prof>0))
prof, phs = presto.fold(np.ones(10000), 0.001, 100, 1, startphs=0.99)
assert(np.allclose(prof, prof.mean()))
assert(np.all(prof>0))
prof, phs = presto.fold(np.ones(10000), 0.001, 100, 1, startphs=0.99, standard=False)
assert(np.allclose(prof, prof.mean()))
assert(np.all(prof>0))
print("success")
| 6,786 | 32.766169 | 85 | py |
presto | presto-master/tests/test_max_rzw_harmonics.py | import sys
import time
import numpy as np
from numpy.random import standard_normal as norm
from numpy.random import uniform
from presto import presto
if sys.version_info[0]<3:
clk = time.clock
else:
clk = time.perf_counter
N = 2**17
noiseamp = 1.0
numharm = 1
numtrials = 100
us = np.arange(N, dtype=np.float64) / N # normalized time coordinate
rztime = 0.0
rzwtime = 0.0
rzerrs = np.zeros((numtrials, 3))
rzwerrs = np.zeros((numtrials, 4))
theo_max_pow = N**2.0/4.0
for n in range(numtrials):
r = N/(4*numharm) + uniform(0.0, 1.0, 1)[0] # average freq over "observation"
z = uniform(-100, 100, 1)[0] # average fourier f-dot
w = uniform(-600, 600, 1)[0] # fourier freq double deriv
w = 0.0 # fourier freq double deriv
data = np.zeros_like(us)
for ii in range(numharm):
rh = r * (ii + 1)
zh = z * (ii + 1)
wh = w * (ii + 1)
r0 = rh - 0.5 * zh + wh / 12.0 # Make symmetric for all z and w
z0 = zh - 0.5 * wh
phss = 2.0 * np.pi * (us * (us * (us * wh/6.0 + z0/2.0) + r0))
data += np.cos(phss)
data += noiseamp * norm(N)
ft = presto.rfft(data)
offset = uniform(-1.0, 1.0, 3) * np.array([0.5, 2.0, 20.0]) / (0.5 * numharm)
a = clk()
if (numharm > 1):
[maxpow, rmax, zmax, rds] = presto.maximize_rz_harmonics(ft, r+offset[0],
z+offset[1], numharm,
norm=1.0)
else:
[maxpow, rmax, zmax, rd] = presto.maximize_rz(ft, r+offset[0],
z+offset[1],
norm=1.0)
rztime += clk() - a
rzerrs[n] = (maxpow/numharm - theo_max_pow) / theo_max_pow, rmax - r, zmax - z
a = clk()
if (numharm > 1):
[maxpow, rmax, zmax, wmax, rds] = presto.maximize_rzw_harmonics(ft, r+offset[0],
z+offset[1],
w+offset[2], numharm,
norm=1.0)
else:
[maxpow, rmax, zmax, wmax, rd] = presto.maximize_rzw(ft, r+offset[0],
z+offset[1],
w+offset[2],
norm=1.0)
rzwtime += clk() - a
rzwerrs[n] = (maxpow/numharm - theo_max_pow) / theo_max_pow, rmax - r, zmax - z, wmax - w
print("Time for rz: %f" % (rztime / numtrials))
print("Time for rzw: %f" % (rzwtime / numtrials))
print("rzerrs:")
print(" avg: %6.3f %6.3f %6.3f" % tuple(rzerrs.mean(axis=0)))
print(" std: %6.3f %6.3f %6.3f" % tuple(rzerrs.std(axis=0)))
print("rzwerrs:")
print(" avg: %6.3f %6.3f %6.3f %6.3f" % tuple(rzwerrs.mean(axis=0)))
print(" std: %6.3f %6.3f %6.3f %6.3f" % tuple(rzwerrs.std(axis=0)))
| 3,039 | 36.530864 | 93 | py |
presto | presto-master/tests/test_binary_calcs.py | import numpy as np
from presto import presto
from presto import binary_psr
import matplotlib.pyplot as plt
N = 1000 # number of points in each orbit calc
ma = np.arange(float(N))*2.0/N
# This is going for Figure 1 in Hulse & Taylor 1975
psr1 = presto.psrepoch("B1913+16", 42320.0)
# unfortunatey, OMDOT is not in the binary part of the
# database correctly. So we need to set that:
psr1.orb.w = 179.0
psr1.orb.t = 0.0
Eo = presto.keplers_eqn(psr1.orb.t, psr1.orb.p, psr1.orb.e, 1e-15)
Es = presto.dorbint(Eo, N, 2.0*psr1.orb.p/N, psr1.orb)
presto.E_to_v(Es, psr1.orb)
plt.plot(ma, Es, 'b-')
plt.xlabel("Orbital Phase")
plt.ylabel("Pulsar Velocity (km/s)")
plt.show()
# This is going for Figure 1 in Champion et al 2008
bpsr = binary_psr.binary_psr("1903+0327.par")
MJDs = bpsr.T0 + ma * bpsr.par.PB
xs, ys = bpsr.position(MJDs)
#cMJDs = bpsr.demodulate_TOAs(MJDs)
#cxs, cys = bpsr.position(cMJDs)
psr2 = presto.psrepoch("1903+0327.par", bpsr.T0)
psr2.orb.t = 0.0
Eo = presto.keplers_eqn(psr2.orb.t, psr2.orb.p, psr2.orb.e, 1e-15)
Es = presto.dorbint(Eo, N, 2.0*psr2.orb.p/N, psr2.orb)
# bt = Es.copy()
presto.E_to_phib(Es, psr2.orb)
# presto.E_to_phib_BT(bt, psr2.orb)
plt.plot(ma, Es, 'b-')
plt.plot(ma, -xs, 'r-')
#plt.plot(ma, Es - -xs, '-')
# plt.plot(ma, bt, 'g-')
# plt.plot(ma, -cxs, 'k-')
plt.xlabel("Orbital Phase")
plt.ylabel("Pulse Delay (s)")
plt.show()
| 1,372 | 28.847826 | 66 | py |
presto | presto-master/tests/python/cvec.py | from __future__ import print_function
from Numeric import *
from cvects import *
def carr(n):
return complex_arr(n)
def multcarr(a, val):
mult_arr(a, val, len(a))
a = open("cvects.h", "r")
print(get_filelen(a))
a.close()
d = arange(10, typecode='d')
print(d)
dgenrotate_1d(d, len(d), 3.5)
print(d)
print(dstats(d, len(d)))
f = complex_arr(10)
print(f)
mult_arr(f, 10.0, len(f))
print(f)
| 400 | 15.04 | 37 | py |
presto | presto-master/examplescripts/ffdot_example.py | from __future__ import print_function
import numpy as num
from presto import presto
import presto.ppgplot as ppgplot
from presto.Pgplot import pgpalette
from numpy.random import standard_normal as norm
import time
N = 2**14
r = N/4.0 # average freq over "observation"
#r = N/4.0 + 0.5 # average freq over "observation"
rint = num.floor(r)
dr = 1.0/32.0
dz = 0.18
np = 512 # number of pixels across for f-fdot image
z = 10.0 # average fourier f-dot
w = -40.0 # fourier freq double deriv
noise = 0.0
noise = 1.0*norm(N)
us = num.arange(N, dtype=num.float64) / N # normalized time coordinate
r0 = r - 0.5 * z + w / 12.0 # Make symmetric for all z and w
z0 = z - 0.5 * w
phss = 2.0 * num.pi * (us * (us * (us * w/6.0 + z0/2.0) + r0))
ft = presto.rfft(num.cos(phss)+noise)
ffdot = presto.ffdot_plane(ft, rint-np/2*dr, dr, np, 0.0-np/2*dz, dz, np)
pffdot = presto.spectralpower(ffdot.flat)
theo_max_pow = N**2.0/4.0
frp = max(pffdot) / theo_max_pow # Fraction of recovered power
print("Fraction of recovered signal power = %f" % frp)
a = time.clock()
[maxpow, rmax, zmax, rd] = presto.maximize_rz(ft, r+norm(1)[0]/5.0,
z+norm(1)[0], norm=1.0)
print("Time for rz:", time.clock()-a)
print(r, rmax, z, zmax, theo_max_pow, maxpow)
a = time.clock()
[maxpow, rmax, zmax, wmax, rd] = presto.maximize_rzw(ft, r+norm(1)[0]/5.0,
z+norm(1)[0],
w+norm(1)[0]*5.0,
norm=1.0)
print("Time for rzw:", time.clock()-a)
print(r, rmax, z, zmax, w, wmax, theo_max_pow, maxpow)
#print "Raw power should be ~%.2e" % theo_max_pow
pffdot = pffdot / theo_max_pow
pffdot.shape = (np, np)
rs = num.arange(np) * dr - np//2*dr
zs = num.arange(np) * dz - np//2*dz
rgx = num.asarray([rs[0], rs[np-1]])
rgy = num.asarray([zs[0], zs[np-1]])
freqcut = pffdot[np//2, :]
fdotcut = pffdot[:, np//2]
image='antirainbow'
device='ffdot_combined.eps/VCPS'
device='/XWIN'
labx='Fourier Frequency Offset (bins)'
laby='Fourier Frequency Derivative (bins)'
contours = num.asarray([0.1, 0.3, 0.5, 0.7, 0.9])
imfract = 0.65
margin = 0.08
ppgplot.pgopen(device)
ppgplot.pgpap(0.0, 1.0)
ppgplot.pgpage()
# Give z and w values and power change
ppgplot.pgsvp(margin+imfract, 1.0-margin/2, margin+imfract, 1.0-margin/2)
ppgplot.pgswin(0.0, 1.0, 0.0, 1.0)
ppgplot.pgtext(0.1, 0.8, "Frac Recovered" % frp)
ppgplot.pgtext(0.2, 0.65, "Power = %.3f" % frp)
ppgplot.pgtext(0.1, 0.4, "signal z = %.1f" % z)
ppgplot.pgtext(0.1, 0.25, "signal w = %.1f" % w)
# freq cut
ppgplot.pgsvp(margin, margin+imfract, margin+imfract, 1.0-margin/2)
ppgplot.pgswin(min(rs), max(rs), -0.1, 1.1)
ppgplot.pgbox("BCST", 0.0, 0, "BCNST", 0.0, 0)
ppgplot.pgline(rs, freqcut)
ppgplot.pgmtxt("L", 2.0, 0.5, 0.5, "Relative Power");
#fdot cut
ppgplot.pgsvp(margin+imfract, 1.0-margin/2, margin, margin+imfract)
ppgplot.pgswin(-0.1, 1.1, min(zs), max(zs))
ppgplot.pgbox("BCNST", 0.0, 0, "BCST", 0.0, 0)
ppgplot.pgline(fdotcut, zs)
ppgplot.pgmtxt("B", 2.4, 0.5, 0.5, "Relative Power");
# f-fdot image
ppgplot.pgsvp(margin, margin+imfract, margin, margin+imfract)
ppgplot.pgswin(min(rs), max(rs), min(zs), max(zs))
ppgplot.pgmtxt("B", 2.4, 0.5, 0.5, labx);
ppgplot.pgmtxt("L", 2.0, 0.5, 0.5, laby);
lo_col_ind, hi_col_ind = ppgplot.pgqcol()
lo_col_ind = lo_col_ind + 2
ppgplot.pgscir(lo_col_ind, hi_col_ind)
pgpalette.setpalette(image)
ppgplot.pgctab(pgpalette.l, pgpalette.r, pgpalette.g, pgpalette.b)
ppgplot.pgimag_s(pffdot, 0.0, 0.0, rgx[0], rgy[0], rgx[1], rgy[1])
ppgplot.pgsci(1)
ppgplot.pgcont_s(pffdot, len(contours), contours, rgx[0], rgy[0], rgx[1], rgy[1])
ppgplot.pgbox("BCST", 0.0, 0, "BCST", 0.0, 0)
ppgplot.pgsci(1)
ppgplot.pgbox("N", 0.0, 0, "N", 0.0, 0)
# gray axes
ppgplot.pgscr(1, 0.5, 0.5, 0.5)
ppgplot.pgsci(1)
ppgplot.pgslw(2)
ppgplot.pgline(rgx, num.asarray([0.0, 0.0]))
ppgplot.pgline(num.asarray([0.0, 0.0]), rgy)
ppgplot.pgclos()
| 3,978 | 33.903509 | 81 | py |
presto | presto-master/examplescripts/pdm2raw.py | from __future__ import print_function
from numpyio import fread, fwrite
from sys import argv
print("\nReading info from %s.hdr and" % argv[1])
print("%s.dat\n" % argv[1])
print("Writing %s.raw\n" % argv[1])
HEADERLEN = 640
BLOCKLEN = 49152
# Read the header file
file = open(argv[1]+'.hdr', 'r')
data = fread(file, HEADERLEN+8, 'b')
file.close()
header = data[4:-4]
infile = open(argv[1]+'.dat', 'r')
outfile = open(argv[1]+'.raw', 'w')
# Read and write the raw data
while (1):
data = fread(infile, BLOCKLEN+8, 'b')
if (len(data)==BLOCKLEN+8):
fwrite(outfile, HEADERLEN, header, 'b')
fwrite(outfile, BLOCKLEN, data[4:-4], 'b')
else:
break
print('')
infile.close()
outfile.close()
| 722 | 20.909091 | 50 | py |
presto | presto-master/examplescripts/ACCEL_sift.py | from __future__ import absolute_import
from builtins import map
import re
import glob
import presto.sifting as sifting
from operator import itemgetter, attrgetter
# Note: You will almost certainly want to adjust
# the following variables for your particular search
# glob for ACCEL files
globaccel = "*ACCEL_*0"
# glob for .inf files
globinf = "*DM*.inf"
# In how many DMs must a candidate be detected to be considered "good"
min_num_DMs = 2
# Lowest DM to consider as a "real" pulsar
low_DM_cutoff = 2.0
# Ignore candidates with a sigma (from incoherent power summation) less than this
sifting.sigma_threshold = 4.0
# Ignore candidates with a coherent power less than this
sifting.c_pow_threshold = 100.0
# If the birds file works well, the following shouldn't
# be needed at all... If they are, add tuples with the bad
# values and their errors.
# (ms, err)
sifting.known_birds_p = []
# (Hz, err)
sifting.known_birds_f = []
# The following are all defined in the sifting module.
# But if we want to override them, uncomment and do it here.
# You shouldn't need to adjust them for most searches, though.
# How close a candidate has to be to another candidate to
# consider it the same candidate (in Fourier bins)
sifting.r_err = 1.1
# Shortest period candidates to consider (s)
sifting.short_period = 0.0005
# Longest period candidates to consider (s)
sifting.long_period = 15.0
# Ignore any candidates where at least one harmonic does exceed this power
sifting.harm_pow_cutoff = 8.0
#--------------------------------------------------------------
# Try to read the .inf files first, as _if_ they are present, all of
# them should be there. (if no candidates are found by accelsearch
# we get no ACCEL files...
inffiles = glob.glob(globinf)
candfiles = glob.glob(globaccel)
# Check to see if this is from a short search
if len(re.findall("_[0-9][0-9][0-9]M_" , inffiles[0])):
dmstrs = [x.split("DM")[-1].split("_")[0] for x in candfiles]
else:
dmstrs = [x.split("DM")[-1].split(".inf")[0] for x in inffiles]
dms = list(map(float, dmstrs))
dms.sort()
dmstrs = ["%.2f"%x for x in dms]
# Read in all the candidates
cands = sifting.read_candidates(candfiles)
# Remove candidates that are duplicated in other ACCEL files
if len(cands):
cands = sifting.remove_duplicate_candidates(cands)
# Remove candidates with DM problems
if len(cands):
cands = sifting.remove_DM_problems(cands, min_num_DMs, dmstrs, low_DM_cutoff)
# Remove candidates that are harmonically related to each other
# Note: this includes only a small set of harmonics
if len(cands):
cands = sifting.remove_harmonics(cands)
# Write candidates to STDOUT
if len(cands):
cands.sort(key=attrgetter('sigma'), reverse=True)
sifting.write_candlist(cands)
| 2,805 | 33.219512 | 81 | py |
presto | presto-master/examplescripts/dedisp.py | from __future__ import print_function
from builtins import zip
from builtins import range
import os
# To use this script to help you dedisperse a bunch of time series, first
# run DDplan.py with appropriate values for your data to generate a
# dedispersion plan:
#
# sransom@foops:~$ DDplan.py -d 200 -t 0.000072 -s 32 -n 96 -b 48.0 -f 820.0
#
# Minimum total smearing : 0.102 ms
# --------------------------------------------
# Minimum channel smearing : 3.76e-05 ms
# Minimum smearing across BW : 0.00361 ms
# Minimum sample time : 0.072 ms
#
# Setting the new 'best' resolution to : 0.072 ms
# Best guess for optimal initial dDM is 0.199
#
# Low DM High DM dDM DownSamp dsubDM #DMs DMs/call calls WorkFract
# 0.000 38.400 0.20 1 4.80 192 24 8 0.7273
# 38.400 60.000 0.30 2 7.20 72 24 3 0.1364
# 60.000 108.000 0.50 4 12.00 96 24 4 0.09091
# 108.000 204.000 1.00 8 24.00 96 24 4 0.04545
#
#
# Now with that plan, fill in the lists below and appropriate variables
# for your data and you can then generate the subbands and time series
# using "python dedisp.py"
#
def myexecute(cmd):
print("'%s'"%cmd)
os.system(cmd)
# dDM steps from DDplan.py
dDMs = [0.2, 0.3, 0.5, 1.0]
# dsubDM steps
dsubDMs = [4.8, 7.2, 12.0, 24.0]
# downsample factors
downsamps = [1, 2, 4, 8]
# number of calls per set of subbands
subcalls = [8, 3, 4, 4]
# The low DM for each set of DMs
startDMs = [0.0, 38.4, 60.0, 108.0]
# DMs/call
dmspercall = 24
# Number of subbands
nsub = 32
# The number of points in the least-downsampled time series
numout = 500000
# The basename of the output files you want to use
basename = "S0062004000"
# The name of the raw data file (or files if you use wildcards) to use
rawfiles = basename+"*.bcpm2"
# The name of the maskfile to apply (if no mask, use None)
maskfile = basename+"_rfifind.mask"
# Loop over the DDplan plans
for dDM, dsubDM, downsamp, subcall, startDM in \
zip(dDMs, dsubDMs, downsamps, subcalls, startDMs):
# Get our downsampling right
subdownsamp = downsamp/2
datdownsamp = 2
if downsamp < 2: subdownsamp = datdownsamp = 1
# Loop over the number of calls
for ii in range(subcall):
subDM = startDM + (ii+0.5)*dsubDM
# First create the subbands
if maskfile:
myexecute("prepsubband -mask %s -sub -subdm %.2f -nsub %d -downsamp %d -o %s %s" %
(maskfile, subDM, nsub, subdownsamp, basename, rawfiles))
else:
myexecute("prepsubband -sub -subdm %.2f -nsub %d -downsamp %d -o %s %s" %
(subDM, nsub, subdownsamp, basename, rawfiles))
# And now create the time series
loDM = startDM + ii*dsubDM
subnames = basename+"_DM%.2f.sub[0-9]*"%subDM
myexecute("prepsubband -lodm %.2f -dmstep %.2f -numdms %d -downsamp %d -o %s %s" %
(loDM, dDM, dmspercall, datdownsamp, basename, subnames))
| 3,081 | 35.690476 | 94 | py |
presto | presto-master/examplescripts/jerk_example.py | from __future__ import print_function
import numpy as num
from presto import presto
import presto.ppgplot as ppgplot
import time
from presto.Pgplot import pgpalette
N = 2**14
r = N/4.0 # average freq over "observation"
#r = N/4.0 + 0.5 # average freq over "observation"
rint = num.floor(r)
numbetween = 8
dr = 1.0/numbetween
dz = 4.0/numbetween
dw = 20.0/numbetween
np = 256 # number of pixels across for f-fdot image
z = 0.0 # average fourier f-dot
w = 0.0 # fourier freq double deriv
#noise = 0.0
noise = 0.0*num.random.standard_normal(N)
us = num.arange(N, dtype=num.float64) / N # normalized time coordinate
r0 = r - 0.5 * z + w / 12.0 # Make symmetric for all z and w
z0 = z - 0.5 * w
phss = 2.0 * num.pi * (us * (us * (us * w/6.0 + z0/2.0) + r0))
ft = presto.rfft(num.cos(phss)+noise)
a = time.clock()
vol = presto.fdotdot_vol(ft, rint-np/2*dr, dr, np,
0.0-np/2*dz, dz, np,
0.0-np/2*dw, dw, np)
print("First jerk vol took %.3f s" % (time.clock()-a))
a = time.clock()
vol = presto.fdotdot_vol(ft, rint-np/2*dr, dr, np,
0.0-np/2*dz, dz, np,
0.0-np/2*dw, dw, np)
print("Second jerk vol took %.3f s" % (time.clock()-a))
pvol = presto.spectralpower(vol.flat)
theo_max_pow = N**2.0/4.0
frp = max(pvol) / theo_max_pow # Fraction of recovered power
print("Fraction of recovered signal power = %f" % frp)
[maxpow, rmax, zmax, rd] = presto.maximize_rz(ft, r+num.random.standard_normal(1)[0]/5.0,
z+num.random.standard_normal(1)[0], norm=1.0)
print(r, rmax, z, zmax, theo_max_pow, maxpow)
# print("Raw power should be ~%.2e" % theo_max_pow)
pvol = pvol / theo_max_pow
pvol.shape = (np, np, np)
rs = num.arange(np) * dr - np/2*dr
zs = num.arange(np) * dz - np/2*dz
ws = num.arange(np) * dw - np/2*dw
rgx = num.asarray([rs[0], rs[np-1]])
rgy = num.asarray([zs[0], zs[np-1]])
# Use the following if you want frames for a movie. See the bottom
# of this file for the other commands to generate that movie.
#device='jerk_%03d.eps/VCPS'
device='/XWIN'
image='antirainbow'
labx='Fourier Frequency Offset (bins)'
laby='Fourier Frequency Derivative (bins)'
contours = num.asarray([0.1, 0.3, 0.5, 0.7, 0.9])
imfract = 0.65
margin = 0.08
if device=="/XWIN":
ppgplot.pgopen(device)
ppgplot.pgpap(0.0, 1.0)
ppgplot.pgpage()
for ii in range(np):
if not device=="/XWIN":
ppgplot.pgopen(device%ii)
ppgplot.pgpap(0.0, 1.0)
ppgplot.pgpage()
freqcut = pvol[ii, np//2, :]
fdotcut = pvol[ii, :, np//2]
frp = pvol[ii].max() # Fraction of recovered power
print("w = %.3f frac pow recovered = %.3f" % (ws[ii], frp))
# Give z and w values and power change
ppgplot.pgsvp(margin+imfract, 1.0-margin/2, margin+imfract, 1.0-margin/2)
ppgplot.pgswin(0.0, 1.0, 0.0, 1.0)
ppgplot.pgtext(0.1, 0.8, "Frac Recovered")
ppgplot.pgtext(0.2, 0.65, "Power = %.3f" % frp)
ppgplot.pgtext(0.1, 0.4, "signal z = %.1f" % z)
ppgplot.pgtext(0.1, 0.25, "w = %.1f" % ws[ii])
# freq cut
ppgplot.pgsvp(margin, margin+imfract, margin+imfract, 1.0-margin/2)
ppgplot.pgswin(min(rs), max(rs), -0.1, 1.1)
ppgplot.pgbox("BCST", 0.0, 0, "BCNST", 0.0, 0)
ppgplot.pgline(rs, freqcut)
ppgplot.pgmtxt("L", 2.0, 0.5, 0.5, "Relative Power");
#fdot cut
ppgplot.pgsvp(margin+imfract, 1.0-margin/2, margin, margin+imfract)
ppgplot.pgswin(-0.1, 1.1, min(zs), max(zs))
ppgplot.pgbox("BCNST", 0.0, 0, "BCST", 0.0, 0)
ppgplot.pgline(fdotcut, zs)
ppgplot.pgmtxt("B", 2.4, 0.5, 0.5, "Relative Power");
# f-fdot image
ppgplot.pgsvp(margin, margin+imfract, margin, margin+imfract)
ppgplot.pgswin(min(rs), max(rs), min(zs), max(zs))
ppgplot.pgmtxt("B", 2.4, 0.5, 0.5, labx);
ppgplot.pgmtxt("L", 2.0, 0.5, 0.5, laby);
lo_col_ind, hi_col_ind = ppgplot.pgqcol()
lo_col_ind = lo_col_ind + 2
ppgplot.pgscir(lo_col_ind, hi_col_ind)
pgpalette.setpalette(image)
ppgplot.pgctab(pgpalette.l, pgpalette.r, pgpalette.g, pgpalette.b)
ppgplot.pgimag_s(pvol[ii], 0.0, 0.0, rgx[0], rgy[0], rgx[1], rgy[1])
ppgplot.pgsci(1)
ppgplot.pgcont_s(pvol[ii], len(contours), contours, rgx[0], rgy[0], rgx[1], rgy[1])
ppgplot.pgbox("BCST", 0.0, 0, "BCST", 0.0, 0)
ppgplot.pgsci(1)
ppgplot.pgbox("N", 0.0, 0, "N", 0.0, 0)
# gray axes
ppgplot.pgscr(1, 0.5, 0.5, 0.5)
ppgplot.pgsci(1)
ppgplot.pgslw(2)
ppgplot.pgline(rgx, num.asarray([0.0, 0.0]))
ppgplot.pgline(num.asarray([0.0, 0.0]), rgy)
time.sleep(0.1)
if device=="/XWIN":
ppgplot.pgeras()
else:
ppgplot.pgclos()
if device=="/XWIN":
ppgplot.pgclos()
else:
print("""If you want to make a movie with the resulting .eps files, here are
the appropriate commands:
> python jerk_example.py
> pstoimg -density 200 -antialias -crop a jerk_*eps
> ffmpeg -r 16 -f image2 -s 1000x1000 -i jerk_%03d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p jerk_search.mp4
""")
| 5,022 | 34.125874 | 111 | py |
presto | presto-master/examplescripts/full_analysis.py | #!/usr/bin/python
from os import system, chdir, remove, environ
from sys import stdout, argv, exit
from glob import glob
from optparse import OptionParser
from fcntl import *
def myexecute(cmd):
stdout.write("\n'"+cmd+"'\n")
stdout.flush()
system(cmd)
def main():
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-c", "--numcpus", type="int", dest="numcpus", default=1,
help="Number of cpus/node working on the data set.")
parser.add_option("-f", "--fft", action="store_true", dest="fft", default=0,
help="Calculate (and later remove) the .fft files")
parser.add_option("-v", "--baryvel", type="float", dest="baryv", default=0.0,
help="Barycentric velocity in units of c")
parser.add_option("-o", "--outdir", type="string", dest="outdir", default=".",
help="Output directory to store results")
parser.add_option("-k", "--workdir", type="string", dest="workdir", default=".",
help="Working directory for search")
parser.add_option("-l", "--flo", type="float", dest="flo", default=1.0,
help="Low frequency (Hz) to search")
parser.add_option("-x", "--fhi", type="float", dest="fhi", default=10000.0,
help="High frequency (Hz) to search")
parser.add_option("-z", "--zmax", type="int", dest="zmax", default=170,
help="Maximum fourier drift (bins) to search")
parser.add_option("-w", "--wmax", type="int", dest="wmax", default=None,
help="Maximum fourier 2nd deriv (bins) to search")
parser.add_option("-a", "--numharm", type="int", dest="numharm", default=8,
help="Number of harmonics to sum when searching")
parser.add_option("-s", "--sigma", type="float", dest="sigma", default=2.0,
help="Cutoff sigma to consider a candidate")
parser.add_option("-p", "--pmax", type="int", dest="pmax", default=6,
help="Maximum # of harmonics to sum in sideband search")
(options, args) = parser.parse_args()
if (options.outdir[-1]!="/"):
options.outdir = options.outdir+"/"
if (options.workdir!='.'):
chdir(options.workdir)
# Get the datafiles and determine their DMs from their names
datanames = glob('*.dat')
if (len(datanames)==0):
exit(0)
dms = []
for dataname in datanames:
loptr = dataname.find("_DM")+3
hiptr = dataname.find(".dat")
dms.append(float(dataname[loptr:hiptr]))
dms.sort()
# Determine the CPU we are currently using (with Torque-based queue
# cpunum = int(environ['PBS_VNODENUM'])%options.numcpus
# Otherwise this needs to be set somehow....
cpunum = 0
# The basename of the data files
basename = datanames[0][:loptr-3]
# Get the bird file (the first birdie file in the directory!)
birdname = glob("*.birds")
if birdname:
birdname = birdname[0]
for ii in range(len(dms)):
dm = dms[ii]
# Assign each processor a DM to work on
if ii%options.numcpus == cpunum:
filenamebase = basename+'_DM%.2f'%dm
outnamebase = options.outdir+filenamebase
if options.fft:
myexecute('realfft '+filenamebase+'.dat')
myexecute('cp '+birdname+' '+filenamebase+'.birds')
myexecute('makezaplist.py '+filenamebase+'.birds')
myexecute('rm '+filenamebase+'.birds')
myexecute('zapbirds -zap -zapfile '+filenamebase+
'.zaplist -baryv %g '%options.baryv+filenamebase+'.fft')
myexecute('rm '+filenamebase+'.zaplist')
#myexecute('search_bin -flo 80 -ncand 200 -harmsum 1 '+filenamebase+'.fft')
#myexecute('search_bin -flo 80 -ncand 200 -harmsum %d '%options.pmax+filenamebase+'.fft')
#myexecute('cp '+filenamebase+'_bin* '+options.outdir)
#myexecute('accelsearch -sigma %f -zmax 4 -numharm %d -flo %f -fhi %f ' % \
# (options.sigma, options.numharm, options.flo, options.fhi)+filenamebase+'.fft')
#myexecute('cp '+filenamebase+'_ACCEL_4 '+options.outdir)
#myexecute('cp '+filenamebase+'_ACCEL_4.cand '+options.outdir)
#myexecute('cp '+filenamebase+'.inf '+options.outdir)
if options.wmax is not None:
myexecute('accelsearch -sigma %f -zmax %d -wmax %d -numharm %d -flo %f -fhi %f ' % \
(options.sigma, options.zmax, options.wmax, options.numharm, options.flo, options.fhi)+filenamebase+'.fft')
myexecute('cp '+filenamebase+'_JERK_%d '%options.wmax+options.outdir)
myexecute('cp '+filenamebase+'_JERK_%d.cand '%options.wmax+options.outdir)
else:
myexecute('accelsearch -sigma %f -zmax %d -numharm %d -flo %f -fhi %f ' % \
(options.sigma, options.zmax, options.numharm, options.flo, options.fhi)+filenamebase+'.fft')
myexecute('cp '+filenamebase+'_ACCEL_%d '%options.zmax+options.outdir)
myexecute('cp '+filenamebase+'_ACCEL_%d.cand '%options.zmax+options.outdir)
myexecute('single_pulse_search.py -f -p '+filenamebase+'.dat')
myexecute('cp '+filenamebase+'.singlepulse '+options.outdir)
myexecute('cp '+filenamebase+'.inf '+options.outdir)
if options.fft:
myexecute('rm '+filenamebase+'.fft')
else: pass
if __name__ == "__main__":
main()
| 5,749 | 49.438596 | 141 | py |
presto | presto-master/examplescripts/ppdot_plane_plot.py | #!/usr/bin/env python
from __future__ import print_function
from builtins import zip
import numpy as np
import presto.psr_utils as pu
import presto.pypsrcat as cat
import matplotlib.pyplot as plt
# Use color?
usecolor = True
# Find a list of the "good" pulsars: those not in GCs and with a measured pdot
# Also identify which pulsars are "special"
numgood = 0
numGC = 0
numpd0 = 0
ps = []
pds = []
rrats = []
radios = []
nonradios = []
magnetars = []
hepsrs = []
snrs = []
binaries = []
for psr in cat.psrs:
# Ignore pulsars without measured Pdot
if psr.pd==0.0:
numpd0 += 1
continue
# Ignore globular cluster pulsars
elif (psr.assoc is not None and 'GC' in psr.assoc):
numGC += 1
continue
else:
ps.append(psr.p)
pds.append(psr.pd)
if psr.type is not None:
if 'RRAT' in psr.type: rrats.append(numgood)
if 'NRAD' in psr.type:
nonradios.append(numgood)
if 'AXP' in psr.type: magnetars.append(numgood)
if 'HE' in psr.type: hepsrs.append(numgood)
if numgood not in nonradios:
radios.append(numgood)
if psr.assoc is not None:
if 'SNR' in psr.assoc: snrs.append(numgood)
if psr.binary:
binaries.append(numgood)
numgood += 1
ps = np.asarray(ps)
pds = np.asarray(pds)
rrats = np.asarray(rrats)
radios = np.asarray(radios)
nonradios = np.asarray(nonradios)
magnetars = np.asarray(magnetars)
hepsrs = np.asarray(hepsrs)
snrs = np.asarray(snrs)
binaries = np.asarray(binaries)
print("Plotting %d pulsars total:" % numgood)
print(" %d radio, %d non-radio" % (len(radios), len(nonradios)))
print(" RRATs:", len(rrats))
print(" magnetars:", len(magnetars))
print(" high-energy:", len(hepsrs))
print(" in SNRs:", len(snrs))
print(" in binaries:", len(binaries))
print("Rejected %d for having no p-dot and %d for being in a cluster" % \
(numpd0, numGC))
# Now set up the plot
plims = np.asarray([0.001, 20.0])
pdlims = np.asarray([1e-22, 1e-9])
dpdpd = (np.log10(plims[1]) - np.log10(plims[0])) / \
(np.log10(pdlims[1]) - np.log10(pdlims[0]))
grey = '0.8'
greytext = '0.3'
plt.figure(num=None, figsize=(9, 9), dpi=200)
ax = plt.gca()
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(plims)
ax.set_ylim(pdlims)
# Make period labels be non-scientific notation
ax.get_xaxis().set_major_formatter(plt.FormatStrFormatter("%g"))
# Plot magnetic field lines
Bs_to_plot = [8, 10, 12, 14]
for logB in Bs_to_plot:
plt.plot(plims, pu.pdot_from_B(plims, 10.0**logB), '-', color=grey)
if logB==14:
y = 4e-10
x = pu.pdot_from_B(y, 10.0**logB)
elif logB==8:
x = 0.05
y = pu.pdot_from_B(x, 10.0**logB)
else:
x = 1.1 * plims[0]
y = pu.pdot_from_B(x, 10.0**logB)
plt.text(x, 0.6*y, "$10^{%d}$ G"%logB, color=greytext,
horizontalalignment='left', verticalalignment='baseline',
rotation=np.degrees(np.arctan(-1.0 * dpdpd)))
# Plot Edot lines
Edots_to_plot = [31, 34, 37, 40]
for logEdot in Edots_to_plot[::-1]:
plt.plot(plims, pu.pdot_from_edot(plims, 10.0**logEdot), '-', color=grey)
if logEdot > 31:
y = 5e-10
x = 0.6 * (y * 4e45 * np.pi * np.pi / 10.0**logEdot)**(1.0/3.0)
else:
y = 1e-21
x = 0.6 * (y * 4e45 * np.pi * np.pi / 10.0**logEdot)**(1.0/3.0)
plt.text(x, 0.14*y, "$10^{%d}$ erg/s"%logEdot, color=greytext,
horizontalalignment='left', verticalalignment='baseline',
rotation=np.degrees(np.arctan(3.0 * dpdpd)))
# Plot Age lines
Ages_to_plot = [3, 5, 7, 9, 11]
Ages_labels = ['1 Kyr', '100 Kyr', '10 Myr', '1 Gyr', '100 Gyr']
for logAge, label in zip(Ages_to_plot, Ages_labels):
plt.plot(plims, pu.pdot_from_age(plims, 10.0**logAge), '-', color=grey)
x = 1.1 * plims[0]
plt.text(x, 1.1 * pu.pdot_from_age(x, 10.0**logAge), label, color=greytext,
horizontalalignment='left', verticalalignment='bottom',
rotation=np.degrees(np.arctan(1.0 * dpdpd)))
# Now plot the radio pulsars as black dots
plt.plot(ps[radios], pds[radios], '.', color='0.3', ms=3, label="Radio PSRs")
# Plot the HE and non-radio pulsars as triagles
# Assume that all non-radio pulsars are high-energy emitters
all_he = np.unique(np.concatenate((hepsrs, nonradios)))
color = 'magenta' if usecolor else 'black'
plt.plot(ps[all_he], pds[all_he], '^', ms=6, mew=1.1, mec=color, mfc='none',
label="X-ray/$\gamma$-ray")
# Plot the binaries as circles
plt.plot(ps[binaries], pds[binaries], 'ko', ms=8, mfc='none',
label="Binaries")
# Plot the SNRs as stars
color = 'darkorange' if usecolor else 'black'
mew = 1.0 if usecolor else 0.7
plt.plot(ps[snrs], pds[snrs], '*', ms=14, mfc='none', mew=mew, mec=color,
label="SNR Assoc")
# Plot the magnetars as filled triangles
color = 'cyan' if usecolor else 'black'
plt.plot(ps[magnetars], pds[magnetars], '^', mec='black', mfc=color, ms=8,
label="Magnetars", alpha=0.5)
# Plot the RRATs as x's
color = 'green' if usecolor else 'black'
plt.plot(ps[rrats], pds[rrats], 'x', ms=6, mew=1.2, mec=color,
label="RRATs")
plt.xlabel("Spin Period (s)")
plt.ylabel("Period Derivative (i.e Spin-Down Rate)")
ax.legend(loc='lower right', numpoints=1)
plt.savefig("ppdot_color_%s.png"%cat.version if usecolor \
else "ppdot_%s.png"%cat.version)
#plt.show()
| 5,371 | 31.361446 | 79 | py |
presto | presto-master/examplescripts/testcorr.py | from __future__ import print_function
from builtins import range
from numpy import *
from Pgplot import *
from presto import *
file = "testz"
displaybins = 20
numbetween = 16
numpts = displaybins * numbetween
# Read the '.mak' file
md = read_makfile(file)
# Open and read the data
fftfile = open(file+".fft","r")
filelen = chkfilelen(fftfile, 8)
data = fftfile.read()
data = fromstring(data, "F")
fftfile.close()
nph = data[0].real
# Set up some useful things
centerr = md.r + md.z / 2.0
startbin = floor(centerr - displaybins / 2.0)
ca = zeros(numpts, dtype=complex64)
cf = arange(startbin, startbin + displaybins, 1.0 / numbetween)
# Show the power spectrum without f-dot correction
kern_halfwidth = z_resp_halfwidth(0.0, HIGHACC)
numkern = 2 * numbetween * kern_halfwidth
for i in range(numpts):
ca[i] = rz_interp(data, filelen, cf[i], 0.0, kern_halfwidth)
cpow = spectralpower(asarray(ca)) / nph
cphs = spectralphase(ca)
maxval = argmax(cpow)
plotxy(cpow, cf-1e6, labx="Fourier Frequency - 1e6", laby="Power")
print("Maximum value is at r =", startbin + maxval / float(numbetween))
print(" Power =", cpow[maxval], " Phase =", cphs[maxval])
closeplot()
# Show the power spectrum with f-dot correction
kern_halfwidth = z_resp_halfwidth(md.z, HIGHACC)
numkern = 2 * numbetween * kern_halfwidth
for i in range(numpts):
ca[i] = rz_interp(data, filelen, cf[i], md.z, kern_halfwidth)
cpow = spectralpower(ca) / nph
cphs = spectralphase(ca)
maxval = argmax(cpow)
plotxy(cpow, cf-1e6, labx="Fourier Frequency - 1e6", laby="Power")
print("Maximum value is at r =", startbin + maxval / float(numbetween))
print(" Power =", cpow[maxval], " Phase =", cphs[maxval])
closeplot()
| 1,689 | 29.727273 | 71 | py |
presto | presto-master/examplescripts/short_analysis_simple.py | #!/usr/bin/python
from os import system, chdir
from sys import stdout, argv, exit
from glob import glob
from optparse import OptionParser
from presto.presto import read_inffile, writeinf, get_baryv
from presto import infodata
def myexecute(cmd):
stdout.write("\n'"+cmd+"'\n")
stdout.flush()
system(cmd)
def main():
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-n", "--number", type="int", dest="nM", default=40,
help="Number of points in each chunk (millions)")
parser.add_option("-o", "--outdir", type="string", dest="outdir", default=".",
help="Output directory to store results")
parser.add_option("-d", "--workdir", type="string", dest="workdir", default=".",
help="Working directory for search")
parser.add_option("-l", "--flo", type="float", dest="flo", default=10.0,
help="Low frequency (Hz) to search")
parser.add_option("-f", "--frac", type="float", dest="frac", default=0.5,
help="Fraction to overlap")
parser.add_option("-x", "--fhi", type="float", dest="fhi", default=10000.0,
help="High frequency (Hz) to search")
parser.add_option("-z", "--zmax", type="int", dest="zmax", default=160,
help="Maximum fourier drift (bins) to search")
parser.add_option("-w", "--wmax", type="int", dest="wmax", default=0,
help="Maximum fourier drift deriv (bins) to search")
parser.add_option("-a", "--numharm", type="int", dest="numharm", default=4,
help="Number of harmonics to sum when searching")
parser.add_option("-s", "--sigma", type="float", dest="sigma", default=2.0,
help="Cutoff sigma to consider a candidate")
(options, args) = parser.parse_args()
if options.outdir[-1]!= "/":
options.outdir = options.outdir+"/"
if options.workdir!= '.':
chdir(options.workdir)
if options.nM >= 1000000:
if options.nM % 1000000:
print("If you specify --num nM to be > 1000000, it must be divisible by 1000000.")
exit(1)
else:
options.nM *= 1000000
short_nM = options.nM // 1000000
# The basename of the data files
if argv[1].endswith(".dat"):
basename = "../"+argv[1][:-4]
else:
basename = "../"+argv[1]
# Get the bird file (the first birdie file in the directory!)
birdname = glob("../*.birds")
if birdname:
birdname = birdname[0]
outnamebase = options.outdir+basename[3:]
inf = read_inffile(basename)
idata = infodata.infodata(basename+".inf")
N = inf.N
t0i = inf.mjd_i
t0f = inf.mjd_f
num = 0
point = 0
T = options.nM * inf.dt / 86400.0
baryv = get_baryv(idata.RA, idata.DEC, idata.epoch, T, obs='GB')
print("Baryv = ", baryv)
inf.N = options.nM
inf.numonoff = 0
nM = options.nM // 1000000
while point + options.nM < N:
pM = point // 1000000
outname = basename[3:]+'_%03dM'%nM+'_%02d'%num
stdout.write('\n'+outname+'\n\n')
inf.name = outname
tstartf = t0f + num * T * options.frac
if tstartf > 1.0:
tstartf = tstartf - 1.0
inf.mjd_i = t0i + 1
inf.mjd_f = tstartf
writeinf(inf)
myexecute('dd if=' + basename +'.dat of=' + outname +'.dat bs=4000000 skip=' +
repr(pM) + ' count=' + repr(nM))
myexecute('realfft '+outname+'.dat')
myexecute('rm -f '+outname+'.dat')
myexecute('cp '+birdname+' '+outname+'.birds')
myexecute('makezaplist.py '+outname+'.birds')
myexecute('rm -f '+outname+'.birds')
myexecute('zapbirds -zap -zapfile '+outname+'.zaplist -baryv %g '%
baryv+outname+'.fft')
myexecute('rm -f '+outname+'.zaplist')
if options.wmax > 0:
myexecute('accelsearch -sigma %.2f -zmax %d -wmax %d -numharm %d -flo %f -fhi %f '%
(options.sigma, options.zmax, options.wmax,
options.numharm, options.flo, options.fhi)+outname+'.fft')
myexecute('rm '+outname+'.fft '+outname+'_JERK_%d.txtcand'%options.wmax)
myexecute('cp '+outname+'_JERK_%d '%options.wmax + options.outdir)
myexecute('cp '+outname+'_JERK_%d.cand '%options.wmax + options.outdir)
else:
myexecute('accelsearch -sigma %.2f -zmax %d -numharm %d -flo %f -fhi %f '%
(options.sigma, options.zmax,
options.numharm, options.flo, options.fhi)+outname+'.fft')
myexecute('rm '+outname+'.fft '+outname+'_ACCEL_%d.txtcand'%options.zmax)
myexecute('cp '+outname+'_ACCEL_%d '%options.zmax + options.outdir)
myexecute('cp '+outname+'_ACCEL_%d.cand '%options.zmax + options.outdir)
myexecute('cp '+outname+'.inf '+options.outdir)
num = num + 1
point = point + int(options.nM * options.frac)
if __name__ == "__main__":
main()
| 5,094 | 41.458333 | 95 | py |
presto | presto-master/examplescripts/concatdata.py | from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import range
# Binary floating point data file concatenation routine
#
# Written by Scott M. Ransom <[email protected]>
# last revision: 1 Mar 99
#
usage = """
Usage: concatdata outfile numpts padval infile1 infile2 ...
This routine helps properly connect binary time-series.
Mandatory arguments:
outfile: The intended name of the freshly concat-ed time-series.
numpts: The number of points for the new time-series to contain.
padval: The value to pad the time-series with.
infiles: A list (in order) of the short data sets to concatenate.
Each file must have a '.dat' suffix and a corresponding
'.inf' file describing each must exist. There
must be at least 2 files to concat.
"""
from sys import argv, exit
from string import atol, rfind
from presto import *
from infodata import *
from math import modf
from Numeric import *
def addtoMJD(daystoadd, MJDi, MJDf):
(fdays, idays) = modf(daystoadd)
MJDf = MJDf + fdays
if (idays >= 0.0):
MJDi = MJDi + int(idays + 1.0e-10)
else:
MJDi = MJDi + int(idays - 1.0e-10)
if (MJDf >= 1.0):
MJDf = MJDf - 1.0
MJDi = MJDi + 1
if (MJDf < 0.0):
MJDf = MJDf + 1.0
MJDi = MJDi - 1
return (MJDi, MJDf)
def subtractMJDs(MJDi1, MJDf1, MJDi2, MJDf2):
# return MJD1 - MJD2
return MJDi1 - MJDi2 + MJDf1 - MJDf2
debug = 1
SBF = 1.0e-4 # Smallest bin fraction to worry about
# Show a usage statement if necessary
if (len(argv)<6):
print(usage)
exit(0)
# Get and check the arguments
print('')
print(' Binary Data Concatenation Routine')
print(' Written by Scott M. Ransom')
print(' 1 Mar 99\n')
outfilenm = argv[1]
numpts = atol(argv[2])
padval = float(argv[3])
if (numpts < 0):
print('numpts must be greater than 0. Exiting.')
print(usage)
exit(-1)
print('Creating a %ld point file named \'%s\'.' % (numpts, outfilenm))
print('Using %f for each padding point.\n' % padval)
# Read the important data from the infofiles into lists
infile = []
file_data = []
file_startMJDi = []
file_startMJDf = []
file_endMJDi = []
file_endMJDf = []
file_N = []
padbins = []
print('The input files are:')
for index in range(len(argv)-4):
infile.append(argv[index+4])
infile[index] = infile[index][0:rfind(infile[index],'.')]
# Get the info about the data file
file_data.append(infodata(infile[index]+".inf"))
file_data[index].mjd_i = int(file_data[index].epoch)
file_data[index].mjd_f = file_data[index].epoch - file_data[index].mjd_i
file_N.append(int(file_data[index].N + 1.0e-10))
file_startMJDi.append(file_data[index].mjd_i)
file_startMJDf.append(file_data[index].mjd_f)
# Calculate the ending MJDs of each segment
(MJDi, MJDf) = addtoMJD((file_data[index].dt * file_N[index]) \
/ SECPERDAY, file_startMJDi[index], \
file_startMJDf[index])
file_endMJDi.append(MJDi)
file_endMJDf.append(MJDf)
print(' %s.dat: %9.0f pts at MJD %5d.%015.0f' % \
(infile[index], file_N[index], \
file_startMJDi[index], file_startMJDf[index] * 1.0e15))
if (index > 0):
if not (dt == file_data[index].dt):
print('\nCannot concatenate the data. The input file dt\'s')
print(' are different. Exiting.')
exit(-1)
else:
dt = file_data[index].dt
# Calculate the number of bins of padding to use between
# each data segment.
padbins.append(subtractMJDs(file_startMJDi[index], \
file_startMJDf[index], \
file_endMJDi[index-1], \
file_endMJDf[index-1]) \
* SECPERDAY / dt)
else:
dt = file_data[index].dt
print('')
# Convert the infodata into Numpy Arrays and determine the number of
# bins to add as padding as well as the shifts needed in the data sets
nf = len(file_data)
padbins = asarray(padbins);
# Calculate the number of whole bins of padding
wholebins = (padbins+SBF).astype('l')
# Calculate the shifts required to keep the latter data segment
# in proper phase with the first segment
shift = padbins - wholebins - 1.0
shift = where(less(shift, -1.0 + SBF), 0.0, shift)
shift = where(greater(shift, 1.0 - SBF), 0.0, shift)
for index in range(len(shift)):
if (fabs(shift[index]) > SBF):
file_N[index + 1] = file_N[index + 1] + 1;
shift = where(greater(fabs(shift), SBF), shift, 0.0)
wholebins = wholebins.tolist()
# Calculate the number of bins of padding to tack on the end
endpad = numpts - add.reduce(wholebins) - add.reduce(file_N)
if endpad:
wholebins.append(endpad)
# Adjust the start MJDs for the shifting of bins in the latter
# data sets.
for index in range(len(shift)):
if (shift[index] < -SBF):
(MJDi, MJDf) = addtoMJD((1.0 + shift[index]) * dt / SECPERDAY, \
file_startMJDi[index+1], \
file_startMJDf[index+1])
file_startMJDi[index+1] = MJDi
file_startMJDf[index+1] = MJDf
# Show the user what shifts were required
print('The bin shifts requires to align the data files in phase are:')
print(' %s.dat: %+f bins' % (infile[0], 0.0))
for index in range(len(shift)):
print(' %s.dat: %+f bins' % (infile[index+1], shift[index]))
print('')
# Show the user what the output files will consist of
print('The output file will consist of:')
outfile_N = []
commands = []
totalbins = 0
for index in range(len(wholebins)):
outfile_N.append(file_N[index])
(MJDi, MJDf) = addtoMJD(totalbins * dt / SECPERDAY, \
file_startMJDi[0], \
file_startMJDf[0])
totalbins = totalbins + outfile_N[2 * index]
print(' data: %9.0f pts starting at MJD %5d.%015.0f' % \
(outfile_N[2 * index], MJDi, MJDf * 1.0e15))
if (index == 0):
commands.append(" cp %s.dat %s" % (infile[0], outfilenm))
else:
commands.append(" shiftdata %f %s.dat >> %s" % \
(shift[index-1], infile[index], outfilenm))
outfile_N.append(wholebins[index])
(MJDi, MJDf) = addtoMJD(totalbins * dt / SECPERDAY, \
file_startMJDi[0], \
file_startMJDf[0])
print(' padding: %9.0f pts starting at MJD %5d.%015.0f' % \
(outfile_N[2 * index + 1], MJDi, MJDf * 1.0e15))
totalbins = totalbins + outfile_N[2 * index + 1]
commands.append(" patchdata %ld %f >> %s" % \
(wholebins[index], padval, outfilenm))
if (len(wholebins) < len(file_N)):
outfile_N.append(file_N[len(file_N)])
(MJDi, MJDf) = addtoMJD(totalbins * dt / SECPERDAY, \
file_startMJDi[0], \
file_startMJDf[0])
print(' data: %9.0f pts starting at MJD %5d.%015.0f' % \
(outfile_N[2 * index + 1], MJDi, MJDf * 1.0e15))
commands.append(" shiftdata %f %s.dat >> %s" % \
(shift[len(file_N)-1], infile[len(file_N)], outfilenm))
print('')
# Show the user the commands we will use to concat everything
print('The commands to perform the concatenation will be:')
for index in range(len(commands)):
print(commands[index])
print('')
| 7,616 | 32.262009 | 76 | py |
presto | presto-master/examplescripts/show_zresp.py | from __future__ import print_function
from builtins import range
from numpy import *
from presto.Pgplot import *
from presto.presto import *
kern_half_width = 10
numbetween = 10
numkern = 2 * numbetween * kern_half_width
f = arange(numkern, dtype=float64) / numbetween - kern_half_width
kern = gen_z_response(0.0, numbetween, numkern, 0.0)
pkern = spectralpower(kern)
print("Freq ", f[len(f)//2], " = ", pkern[len(f)//2])
plotxy(pkern, f, labx="Fourier Frequency Offset", \
laby="Normalized Power", device="z_responses.eps/CPS")
#plotxy(pkern, f, labx="Fourier Frequency Offset", \
# laby="Normalized Power")
for i in range(4):
z = 5.0 * i
kern = gen_z_response(0.0, numbetween, numkern, z)
pkern = spectralpower(kern)
plotxy(pkern, f, color=i+1)
ppgplot.pgtext(5.0, 0.8 - i*0.1, 'z = %1.0f' % z)
closeplot()
| 843 | 32.76 | 65 | py |
CTAB-GAN-Plus | CTAB-GAN-Plus-main/model/ctabgan.py | """
Generative model training algorithm based on the CTABGANSynthesiser
"""
import pandas as pd
import time
from model.pipeline.data_preparation import DataPrep
from model.synthesizer.ctabgan_synthesizer import CTABGANSynthesizer
import warnings
warnings.filterwarnings("ignore")
class CTABGAN():
def __init__(self,
raw_csv_path = "Real_Datasets/Adult.csv",
test_ratio = 0.20,
categorical_columns = [ 'workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'native-country', 'income'],
log_columns = [],
mixed_columns= {'capital-loss':[0.0],'capital-gain':[0.0]},
general_columns = ["age"],
non_categorical_columns = [],
integer_columns = ['age', 'fnlwgt','capital-gain', 'capital-loss','hours-per-week'],
problem_type= {"Classification": "income"}):
self.__name__ = 'CTABGAN'
self.synthesizer = CTABGANSynthesizer()
self.raw_df = pd.read_csv(raw_csv_path)
self.test_ratio = test_ratio
self.categorical_columns = categorical_columns
self.log_columns = log_columns
self.mixed_columns = mixed_columns
self.general_columns = general_columns
self.non_categorical_columns = non_categorical_columns
self.integer_columns = integer_columns
self.problem_type = problem_type
def fit(self):
start_time = time.time()
self.data_prep = DataPrep(self.raw_df,self.categorical_columns,self.log_columns,self.mixed_columns,self.general_columns,self.non_categorical_columns,self.integer_columns,self.problem_type,self.test_ratio)
self.synthesizer.fit(train_data=self.data_prep.df, categorical = self.data_prep.column_types["categorical"], mixed = self.data_prep.column_types["mixed"],
general = self.data_prep.column_types["general"], non_categorical = self.data_prep.column_types["non_categorical"], type=self.problem_type)
end_time = time.time()
print('Finished training in',end_time-start_time," seconds.")
def generate_samples(self):
sample = self.synthesizer.sample(len(self.raw_df))
sample_df = self.data_prep.inverse_prep(sample)
return sample_df
| 2,364 | 41.232143 | 212 | py |
CTAB-GAN-Plus | CTAB-GAN-Plus-main/model/pipeline/data_preparation.py | import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn import model_selection
class DataPrep(object):
def __init__(self, raw_df: pd.DataFrame, categorical: list, log:list, mixed:dict, general:list, non_categorical:list, integer:list, type:dict, test_ratio:float):
self.categorical_columns = categorical
self.log_columns = log
self.mixed_columns = mixed
self.general_columns = general
self.non_categorical_columns = non_categorical
self.integer_columns = integer
self.column_types = dict()
self.column_types["categorical"] = []
self.column_types["mixed"] = {}
self.column_types["general"] = []
self.column_types["non_categorical"] = []
self.lower_bounds = {}
self.label_encoder_list = []
target_col = list(type.values())[0]
if target_col is not None:
y_real = raw_df[target_col]
X_real = raw_df.drop(columns=[target_col])
X_train_real, _, y_train_real, _ = model_selection.train_test_split(X_real ,y_real, test_size=test_ratio, stratify=y_real,random_state=42)
X_train_real[target_col]= y_train_real
self.df = X_train_real
else:
self.df = raw_df
self.df = self.df.replace(r' ', np.nan)
self.df = self.df.fillna('empty')
all_columns= set(self.df.columns)
irrelevant_missing_columns = set(self.categorical_columns)
relevant_missing_columns = list(all_columns - irrelevant_missing_columns)
for i in relevant_missing_columns:
if i in self.log_columns:
if "empty" in list(self.df[i].values):
self.df[i] = self.df[i].apply(lambda x: -9999999 if x=="empty" else x)
self.mixed_columns[i] = [-9999999]
elif i in list(self.mixed_columns.keys()):
if "empty" in list(self.df[i].values):
self.df[i] = self.df[i].apply(lambda x: -9999999 if x=="empty" else x )
self.mixed_columns[i].append(-9999999)
else:
if "empty" in list(self.df[i].values):
self.df[i] = self.df[i].apply(lambda x: -9999999 if x=="empty" else x)
self.mixed_columns[i] = [-9999999]
if self.log_columns:
for log_column in self.log_columns:
valid_indices = []
for idx,val in enumerate(self.df[log_column].values):
if val!=-9999999:
valid_indices.append(idx)
eps = 1
lower = np.min(self.df[log_column].iloc[valid_indices].values)
self.lower_bounds[log_column] = lower
if lower>0:
self.df[log_column] = self.df[log_column].apply(lambda x: np.log(x) if x!=-9999999 else -9999999)
elif lower == 0:
self.df[log_column] = self.df[log_column].apply(lambda x: np.log(x+eps) if x!=-9999999 else -9999999)
else:
self.df[log_column] = self.df[log_column].apply(lambda x: np.log(x-lower+eps) if x!=-9999999 else -9999999)
for column_index, column in enumerate(self.df.columns):
if column in self.categorical_columns:
label_encoder = preprocessing.LabelEncoder()
self.df[column] = self.df[column].astype(str)
label_encoder.fit(self.df[column])
current_label_encoder = dict()
current_label_encoder['column'] = column
current_label_encoder['label_encoder'] = label_encoder
transformed_column = label_encoder.transform(self.df[column])
self.df[column] = transformed_column
self.label_encoder_list.append(current_label_encoder)
self.column_types["categorical"].append(column_index)
if column in self.general_columns:
self.column_types["general"].append(column_index)
if column in self.non_categorical_columns:
self.column_types["non_categorical"].append(column_index)
elif column in self.mixed_columns:
self.column_types["mixed"][column_index] = self.mixed_columns[column]
elif column in self.general_columns:
self.column_types["general"].append(column_index)
super().__init__()
def inverse_prep(self, data, eps=1):
df_sample = pd.DataFrame(data,columns=self.df.columns)
for i in range(len(self.label_encoder_list)):
le = self.label_encoder_list[i]["label_encoder"]
df_sample[self.label_encoder_list[i]["column"]] = df_sample[self.label_encoder_list[i]["column"]].astype(int)
df_sample[self.label_encoder_list[i]["column"]] = le.inverse_transform(df_sample[self.label_encoder_list[i]["column"]])
if self.log_columns:
for i in df_sample:
if i in self.log_columns:
lower_bound = self.lower_bounds[i]
if lower_bound>0:
df_sample[i].apply(lambda x: np.exp(x))
elif lower_bound==0:
df_sample[i] = df_sample[i].apply(lambda x: np.ceil(np.exp(x)-eps) if (np.exp(x)-eps) < 0 else (np.exp(x)-eps))
else:
df_sample[i] = df_sample[i].apply(lambda x: np.exp(x)-eps+lower_bound)
if self.integer_columns:
for column in self.integer_columns:
df_sample[column]= (np.round(df_sample[column].values))
df_sample[column] = df_sample[column].astype(int)
df_sample.replace(-9999999, np.nan,inplace=True)
df_sample.replace('empty', np.nan,inplace=True)
return df_sample
| 6,035 | 45.076336 | 165 | py |
CTAB-GAN-Plus | CTAB-GAN-Plus-main/model/eval/evaluation.py | import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn import model_selection
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import svm,tree
from sklearn.ensemble import RandomForestClassifier
from dython.nominal import compute_associations
from scipy.stats import wasserstein_distance
from scipy.spatial import distance
import warnings
warnings.filterwarnings("ignore")
def supervised_model_training(x_train, y_train, x_test,
y_test, model_name):
if model_name == 'lr':
model = LogisticRegression(random_state=42,max_iter=500)
elif model_name == 'svm':
model = svm.SVC(random_state=42,probability=True)
elif model_name == 'dt':
model = tree.DecisionTreeClassifier(random_state=42)
elif model_name == 'rf':
model = RandomForestClassifier(random_state=42)
elif model_name == "mlp":
model = MLPClassifier(random_state=42,max_iter=100)
model.fit(x_train, y_train)
pred = model.predict(x_test)
if len(np.unique(y_train))>2:
predict = model.predict_proba(x_test)
acc = metrics.accuracy_score(y_test,pred)*100
auc = metrics.roc_auc_score(y_test, predict,average="weighted",multi_class="ovr")
f1_score = metrics.precision_recall_fscore_support(y_test, pred,average="weighted")[2]
return [acc, auc,f1_score]
else:
predict = model.predict_proba(x_test)[:,1]
acc = metrics.accuracy_score(y_test,pred)*100
auc = metrics.roc_auc_score(y_test, predict)
f1_score = metrics.precision_recall_fscore_support(y_test,pred)[2].mean()
return [acc, auc,f1_score]
def get_utility_metrics(real_path,fake_paths,scaler="MinMax",classifiers=["lr","dt","rf","mlp"],test_ratio=.20):
data_real = pd.read_csv(real_path).to_numpy()
data_dim = data_real.shape[1]
data_real_y = data_real[:,-1]
data_real_X = data_real[:,:data_dim-1]
X_train_real, X_test_real, y_train_real, y_test_real = model_selection.train_test_split(data_real_X ,data_real_y, test_size=test_ratio, stratify=data_real_y,random_state=42)
if scaler=="MinMax":
scaler_real = MinMaxScaler()
else:
scaler_real = StandardScaler()
scaler_real.fit(data_real_X)
X_train_real_scaled = scaler_real.transform(X_train_real)
X_test_real_scaled = scaler_real.transform(X_test_real)
all_real_results = []
for classifier in classifiers:
real_results = supervised_model_training(X_train_real_scaled,y_train_real,X_test_real_scaled,y_test_real,classifier)
all_real_results.append(real_results)
all_fake_results_avg = []
for fake_path in fake_paths:
data_fake = pd.read_csv(fake_path).to_numpy()
data_fake_y = data_fake[:,-1]
data_fake_X = data_fake[:,:data_dim-1]
X_train_fake, _ , y_train_fake, _ = model_selection.train_test_split(data_fake_X ,data_fake_y, test_size=test_ratio, stratify=data_fake_y,random_state=42)
if scaler=="MinMax":
scaler_fake = MinMaxScaler()
else:
scaler_fake = StandardScaler()
scaler_fake.fit(data_fake_X)
X_train_fake_scaled = scaler_fake.transform(X_train_fake)
all_fake_results = []
for classifier in classifiers:
fake_results = supervised_model_training(X_train_fake_scaled,y_train_fake,X_test_real_scaled,y_test_real,classifier)
all_fake_results.append(fake_results)
all_fake_results_avg.append(all_fake_results)
diff_results = np.array(all_real_results)- np.array(all_fake_results_avg).mean(axis=0)
return diff_results
def stat_sim(real_path,fake_path,cat_cols=None):
Stat_dict={}
real = pd.read_csv(real_path)
fake = pd.read_csv(fake_path)
really = real.copy()
fakey = fake.copy()
real_corr = compute_associations(real, nominal_columns=cat_cols)
fake_corr = compute_associations(fake, nominal_columns=cat_cols)
corr_dist = np.linalg.norm(real_corr - fake_corr)
cat_stat = []
num_stat = []
for column in real.columns:
if column in cat_cols:
real_pdf=(really[column].value_counts()/really[column].value_counts().sum())
fake_pdf=(fakey[column].value_counts()/fakey[column].value_counts().sum())
categories = (fakey[column].value_counts()/fakey[column].value_counts().sum()).keys().tolist()
sorted_categories = sorted(categories)
real_pdf_values = []
fake_pdf_values = []
for i in sorted_categories:
real_pdf_values.append(real_pdf[i])
fake_pdf_values.append(fake_pdf[i])
if len(real_pdf)!=len(fake_pdf):
zero_cats = set(really[column].value_counts().keys())-set(fakey[column].value_counts().keys())
for z in zero_cats:
real_pdf_values.append(real_pdf[z])
fake_pdf_values.append(0)
Stat_dict[column]=(distance.jensenshannon(real_pdf_values,fake_pdf_values, 2.0))
cat_stat.append(Stat_dict[column])
else:
scaler = MinMaxScaler()
scaler.fit(real[column].values.reshape(-1,1))
l1 = scaler.transform(real[column].values.reshape(-1,1)).flatten()
l2 = scaler.transform(fake[column].values.reshape(-1,1)).flatten()
Stat_dict[column]= (wasserstein_distance(l1,l2))
num_stat.append(Stat_dict[column])
return [np.mean(num_stat),np.mean(cat_stat),corr_dist]
def privacy_metrics(real_path,fake_path,data_percent=15):
real = pd.read_csv(real_path).drop_duplicates(keep=False)
fake = pd.read_csv(fake_path).drop_duplicates(keep=False)
real_refined = real.sample(n=int(len(real)*(.01*data_percent)), random_state=42).to_numpy()
fake_refined = fake.sample(n=int(len(fake)*(.01*data_percent)), random_state=42).to_numpy()
scalerR = StandardScaler()
scalerR.fit(real_refined)
scalerF = StandardScaler()
scalerF.fit(fake_refined)
df_real_scaled = scalerR.transform(real_refined)
df_fake_scaled = scalerF.transform(fake_refined)
dist_rf = metrics.pairwise_distances(df_real_scaled, Y=df_fake_scaled, metric='minkowski', n_jobs=-1)
dist_rr = metrics.pairwise_distances(df_real_scaled, Y=None, metric='minkowski', n_jobs=-1)
rd_dist_rr = dist_rr[~np.eye(dist_rr.shape[0],dtype=bool)].reshape(dist_rr.shape[0],-1)
dist_ff = metrics.pairwise_distances(df_fake_scaled, Y=None, metric='minkowski', n_jobs=-1)
rd_dist_ff = dist_ff[~np.eye(dist_ff.shape[0],dtype=bool)].reshape(dist_ff.shape[0],-1)
smallest_two_indexes_rf = [dist_rf[i].argsort()[:2] for i in range(len(dist_rf))]
smallest_two_rf = [dist_rf[i][smallest_two_indexes_rf[i]] for i in range(len(dist_rf))]
smallest_two_indexes_rr = [rd_dist_rr[i].argsort()[:2] for i in range(len(rd_dist_rr))]
smallest_two_rr = [rd_dist_rr[i][smallest_two_indexes_rr[i]] for i in range(len(rd_dist_rr))]
smallest_two_indexes_ff = [rd_dist_ff[i].argsort()[:2] for i in range(len(rd_dist_ff))]
smallest_two_ff = [rd_dist_ff[i][smallest_two_indexes_ff[i]] for i in range(len(rd_dist_ff))]
nn_ratio_rr = np.array([i[0]/i[1] for i in smallest_two_rr])
nn_ratio_ff = np.array([i[0]/i[1] for i in smallest_two_ff])
nn_ratio_rf = np.array([i[0]/i[1] for i in smallest_two_rf])
nn_fifth_perc_rr = np.percentile(nn_ratio_rr,5)
nn_fifth_perc_ff = np.percentile(nn_ratio_ff,5)
nn_fifth_perc_rf = np.percentile(nn_ratio_rf,5)
min_dist_rf = np.array([i[0] for i in smallest_two_rf])
fifth_perc_rf = np.percentile(min_dist_rf,5)
min_dist_rr = np.array([i[0] for i in smallest_two_rr])
fifth_perc_rr = np.percentile(min_dist_rr,5)
min_dist_ff = np.array([i[0] for i in smallest_two_ff])
fifth_perc_ff = np.percentile(min_dist_ff,5)
return np.array([fifth_perc_rf,fifth_perc_rr,fifth_perc_ff,nn_fifth_perc_rf,nn_fifth_perc_rr,nn_fifth_perc_ff]).reshape(1,6) | 8,160 | 41.284974 | 178 | py |
CTAB-GAN-Plus | CTAB-GAN-Plus-main/model/privacy_utils/rdp_accountant.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
import numpy as np
from scipy import special
import six
########################
# LOG-SPACE ARITHMETIC #
########################
def _log_add(logx, logy):
"""Add two numbers in the log space."""
a, b = min(logx, logy), max(logx, logy)
if a == -np.inf: # adding 0
return b
# Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)
return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)
def _log_sub(logx, logy):
"""Subtract two numbers in the log space. Answer must be non-negative."""
if logx < logy:
raise ValueError("The result of subtraction must be non-negative.")
if logy == -np.inf: # subtracting 0
return logx
if logx == logy:
return -np.inf # 0 is represented as -np.inf in the log space.
try:
# Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y).
return math.log(math.expm1(logx - logy)) + logy # expm1(x) = exp(x) - 1
except OverflowError:
return logx
def _log_print(logx):
"""Pretty print."""
if logx < math.log(sys.float_info.max):
return "{}".format(math.exp(logx))
else:
return "exp({})".format(logx)
def _compute_log_a_int(q, sigma, alpha):
"""Compute log(A_alpha) for integer alpha. 0 < q < 1."""
assert isinstance(alpha, six.integer_types)
# Initialize with 0 in the log space.
log_a = -np.inf
for i in range(alpha + 1):
log_coef_i = (
math.log(special.binom(alpha, i)) + i * math.log(q) +
(alpha - i) * math.log(1 - q))
s = log_coef_i + (i * i - i) / (2 * (sigma**2))
log_a = _log_add(log_a, s)
return float(log_a)
def _compute_log_a_frac(q, sigma, alpha):
"""Compute log(A_alpha) for fractional alpha. 0 < q < 1."""
# The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are
# initialized to 0 in the log space:
log_a0, log_a1 = -np.inf, -np.inf
i = 0
z0 = sigma**2 * math.log(1 / q - 1) + .5
while True: # do ... until loop
coef = special.binom(alpha, i)
log_coef = math.log(abs(coef))
j = alpha - i
log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)
log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)
log_e0 = math.log(.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))
log_e1 = math.log(.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))
log_s0 = log_t0 + (i * i - i) / (2 * (sigma**2)) + log_e0
log_s1 = log_t1 + (j * j - j) / (2 * (sigma**2)) + log_e1
if coef > 0:
log_a0 = _log_add(log_a0, log_s0)
log_a1 = _log_add(log_a1, log_s1)
else:
log_a0 = _log_sub(log_a0, log_s0)
log_a1 = _log_sub(log_a1, log_s1)
i += 1
if max(log_s0, log_s1) < -30:
break
return _log_add(log_a0, log_a1)
def _compute_log_a(q, sigma, alpha):
"""Compute log(A_alpha) for any positive finite alpha."""
if float(alpha).is_integer():
return _compute_log_a_int(q, sigma, int(alpha))
else:
return _compute_log_a_frac(q, sigma, alpha)
def _log_erfc(x):
"""Compute log(erfc(x)) with high accuracy for large x."""
try:
return math.log(2) + special.log_ndtr(-x * 2**.5)
except NameError:
# If log_ndtr is not available, approximate as follows:
r = special.erfc(x)
if r == 0.0:
# Using the Laurent series at infinity for the tail of the erfc function:
# erfc(x) ~ exp(-x^2-.5/x^2+.625/x^4)/(x*pi^.5)
# To verify in Mathematica:
# Series[Log[Erfc[x]] + Log[x] + Log[Pi]/2 + x^2, {x, Infinity, 6}]
return (-math.log(math.pi) / 2 - math.log(x) - x**2 - .5 * x**-2 +
.625 * x**-4 - 37. / 24. * x**-6 + 353. / 64. * x**-8)
else:
return math.log(r)
def _compute_delta(orders, rdp, eps):
"""Compute delta given a list of RDP values and target epsilon.
Args:
orders: An array (or a scalar) of orders.
rdp: A list (or a scalar) of RDP guarantees.
eps: The target epsilon.
Returns:
Pair of (delta, optimal_order).
Raises:
ValueError: If input is malformed.
"""
orders_vec = np.atleast_1d(orders)
rdp_vec = np.atleast_1d(rdp)
if len(orders_vec) != len(rdp_vec):
raise ValueError("Input lists must have the same length.")
deltas = np.exp((rdp_vec - eps) * (orders_vec - 1))
idx_opt = np.argmin(deltas)
return min(deltas[idx_opt], 1.), orders_vec[idx_opt]
def _compute_eps(orders, rdp, delta):
"""Compute epsilon given a list of RDP values and target delta.
Args:
orders: An array (or a scalar) of orders.
rdp: A list (or a scalar) of RDP guarantees.
delta: The target delta.
Returns:
Pair of (eps, optimal_order).
Raises:
ValueError: If input is malformed.
"""
orders_vec = np.atleast_1d(orders)
rdp_vec = np.atleast_1d(rdp)
if len(orders_vec) != len(rdp_vec):
raise ValueError("Input lists must have the same length.")
eps = rdp_vec - math.log(delta) / (orders_vec - 1)
idx_opt = np.nanargmin(eps) # Ignore NaNs
return eps[idx_opt], orders_vec[idx_opt]
def _compute_rdp(q, sigma, alpha):
"""Compute RDP of the Sampled Gaussian mechanism at order alpha.
Args:
q: The sampling rate.
sigma: The std of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at alpha, can be np.inf.
"""
if q == 0:
return 0
if q == 1.:
return alpha / (2 * sigma**2)
if np.isinf(alpha):
return np.inf
return _compute_log_a(q, sigma, alpha) / (alpha - 1)
def compute_rdp(q, noise_multiplier, steps, orders):
"""Compute RDP of the Sampled Gaussian Mechanism.
Args:
q: The sampling rate.
noise_multiplier: The ratio of the standard deviation of the Gaussian noise
to the l2-sensitivity of the function to which it is added.
steps: The number of steps.
orders: An array (or a scalar) of RDP orders.
Returns:
The RDPs at all orders, can be np.inf.
"""
if np.isscalar(orders):
rdp = _compute_rdp(q, noise_multiplier, orders)
else:
rdp = np.array([_compute_rdp(q, noise_multiplier, order)
for order in orders])
return rdp * steps
def get_privacy_spent(orders, rdp, target_eps=None, target_delta=None):
"""Compute delta (or eps) for given eps (or delta) from RDP values.
Args:
orders: An array (or a scalar) of RDP orders.
rdp: An array of RDP values. Must be of the same length as the orders list.
target_eps: If not None, the epsilon for which we compute the corresponding
delta.
target_delta: If not None, the delta for which we compute the corresponding
epsilon. Exactly one of target_eps and target_delta must be None.
Returns:
eps, delta, opt_order.
Raises:
ValueError: If target_eps and target_delta are messed up.
"""
if target_eps is None and target_delta is None:
raise ValueError(
"Exactly one out of eps and delta must be None. (Both are).")
if target_eps is not None and target_delta is not None:
raise ValueError(
"Exactly one out of eps and delta must be None. (None is).")
if target_eps is not None:
delta, opt_order = _compute_delta(orders, rdp, target_eps)
return target_eps, delta, opt_order
else:
eps, opt_order = _compute_eps(orders, rdp, target_delta)
return eps, target_delta, opt_order
def compute_rdp_from_ledger(ledger, orders):
"""Compute RDP of Sampled Gaussian Mechanism from ledger.
Args:
ledger: A formatted privacy ledger.
orders: An array (or a scalar) of RDP orders.
Returns:
RDP at all orders, can be np.inf.
"""
total_rdp = np.zeros_like(orders, dtype=float)
for sample in ledger:
# Compute equivalent z from l2_clip_bounds and noise stddevs in sample.
# See https://arxiv.org/pdf/1812.06210.pdf for derivation of this formula.
effective_z = sum([
(q.noise_stddev / q.l2_norm_bound)**-2 for q in sample.queries])**-0.5
total_rdp += compute_rdp(
sample.selection_probability, effective_z, 1, orders)
return total_rdp | 8,040 | 27.717857 | 79 | py |
CTAB-GAN-Plus | CTAB-GAN-Plus-main/model/synthesizer/ctabgan_synthesizer.py | import numpy as np
import pandas as pd
import torch
import torch.utils.data
import torch.optim as optim
from torch.optim import Adam
from torch.nn import functional as F
from torch.nn import (Dropout, LeakyReLU, Linear, Module, ReLU, Sequential,
Conv2d, ConvTranspose2d, Sigmoid, init, BCELoss, CrossEntropyLoss,SmoothL1Loss,LayerNorm)
from model.synthesizer.transformer import ImageTransformer,DataTransformer
from model.privacy_utils.rdp_accountant import compute_rdp, get_privacy_spent
from tqdm import tqdm
class Classifier(Module):
def __init__(self,input_dim, dis_dims,st_ed):
super(Classifier,self).__init__()
dim = input_dim-(st_ed[1]-st_ed[0])
seq = []
self.str_end = st_ed
for item in list(dis_dims):
seq += [
Linear(dim, item),
LeakyReLU(0.2),
Dropout(0.5)
]
dim = item
if (st_ed[1]-st_ed[0])==1:
seq += [Linear(dim, 1)]
elif (st_ed[1]-st_ed[0])==2:
seq += [Linear(dim, 1),Sigmoid()]
else:
seq += [Linear(dim,(st_ed[1]-st_ed[0]))]
self.seq = Sequential(*seq)
def forward(self, input):
label=None
if (self.str_end[1]-self.str_end[0])==1:
label = input[:, self.str_end[0]:self.str_end[1]]
else:
label = torch.argmax(input[:, self.str_end[0]:self.str_end[1]], axis=-1)
new_imp = torch.cat((input[:,:self.str_end[0]],input[:,self.str_end[1]:]),1)
if ((self.str_end[1]-self.str_end[0])==2) | ((self.str_end[1]-self.str_end[0])==1):
return self.seq(new_imp).view(-1), label
else:
return self.seq(new_imp), label
def apply_activate(data, output_info):
data_t = []
st = 0
for item in output_info:
if item[1] == 'tanh':
ed = st + item[0]
data_t.append(torch.tanh(data[:, st:ed]))
st = ed
elif item[1] == 'softmax':
ed = st + item[0]
data_t.append(F.gumbel_softmax(data[:, st:ed], tau=0.2))
st = ed
return torch.cat(data_t, dim=1)
def get_st_ed(target_col_index,output_info):
st = 0
c= 0
tc= 0
for item in output_info:
if c==target_col_index:
break
if item[1]=='tanh':
st += item[0]
if item[2] == 'yes_g':
c+=1
elif item[1] == 'softmax':
st += item[0]
c+=1
tc+=1
ed= st+output_info[tc][0]
return (st,ed)
def random_choice_prob_index_sampling(probs,col_idx):
option_list = []
for i in col_idx:
pp = probs[i]
option_list.append(np.random.choice(np.arange(len(probs[i])), p=pp))
return np.array(option_list).reshape(col_idx.shape)
def random_choice_prob_index(a, axis=1):
r = np.expand_dims(np.random.rand(a.shape[1 - axis]), axis=axis)
return (a.cumsum(axis=axis) > r).argmax(axis=axis)
def maximum_interval(output_info):
max_interval = 0
for item in output_info:
max_interval = max(max_interval, item[0])
return max_interval
class Cond(object):
def __init__(self, data, output_info):
self.model = []
st = 0
counter = 0
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
counter += 1
self.model.append(np.argmax(data[:, st:ed], axis=-1))
st = ed
self.interval = []
self.n_col = 0
self.n_opt = 0
st = 0
self.p = np.zeros((counter, maximum_interval(output_info)))
self.p_sampling = []
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
tmp = np.sum(data[:, st:ed], axis=0)
tmp_sampling = np.sum(data[:, st:ed], axis=0)
tmp = np.log(tmp + 1)
tmp = tmp / np.sum(tmp)
tmp_sampling = tmp_sampling / np.sum(tmp_sampling)
self.p_sampling.append(tmp_sampling)
self.p[self.n_col, :item[0]] = tmp
self.interval.append((self.n_opt, item[0]))
self.n_opt += item[0]
self.n_col += 1
st = ed
self.interval = np.asarray(self.interval)
def sample_train(self, batch):
if self.n_col == 0:
return None
batch = batch
idx = np.random.choice(np.arange(self.n_col), batch)
vec = np.zeros((batch, self.n_opt), dtype='float32')
mask = np.zeros((batch, self.n_col), dtype='float32')
mask[np.arange(batch), idx] = 1
opt1prime = random_choice_prob_index(self.p[idx])
for i in np.arange(batch):
vec[i, self.interval[idx[i], 0] + opt1prime[i]] = 1
return vec, mask, idx, opt1prime
def sample(self, batch):
if self.n_col == 0:
return None
batch = batch
idx = np.random.choice(np.arange(self.n_col), batch)
vec = np.zeros((batch, self.n_opt), dtype='float32')
opt1prime = random_choice_prob_index_sampling(self.p_sampling,idx)
for i in np.arange(batch):
vec[i, self.interval[idx[i], 0] + opt1prime[i]] = 1
return vec
def cond_loss(data, output_info, c, m):
loss = []
st = 0
st_c = 0
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
ed_c = st_c + item[0]
tmp = F.cross_entropy(
data[:, st:ed],
torch.argmax(c[:, st_c:ed_c], dim=1),
reduction='none')
loss.append(tmp)
st = ed
st_c = ed_c
loss = torch.stack(loss, dim=1)
return (loss * m).sum() / data.size()[0]
class Sampler(object):
def __init__(self, data, output_info):
super(Sampler, self).__init__()
self.data = data
self.model = []
self.n = len(data)
st = 0
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
tmp = []
for j in range(item[0]):
tmp.append(np.nonzero(data[:, st + j])[0])
self.model.append(tmp)
st = ed
def sample(self, n, col, opt):
if col is None:
idx = np.random.choice(np.arange(self.n), n)
return self.data[idx]
idx = []
for c, o in zip(col, opt):
idx.append(np.random.choice(self.model[c][o]))
return self.data[idx]
class Discriminator(Module):
def __init__(self, side, layers):
super(Discriminator, self).__init__()
self.side = side
info = len(layers)-2
self.seq = Sequential(*layers)
self.seq_info = Sequential(*layers[:info])
def forward(self, input):
return (self.seq(input)), self.seq_info(input)
class Generator(Module):
def __init__(self, side, layers):
super(Generator, self).__init__()
self.side = side
self.seq = Sequential(*layers)
def forward(self, input_):
return self.seq(input_)
def determine_layers_disc(side, num_channels):
assert side >= 4 and side <= 64
layer_dims = [(1, side), (num_channels, side // 2)]
while layer_dims[-1][1] > 3 and len(layer_dims) < 4:
layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2))
layerNorms = []
num_c = num_channels
num_s = side / 2
for l in range(len(layer_dims) - 1):
layerNorms.append([int(num_c), int(num_s), int(num_s)])
num_c = num_c * 2
num_s = num_s / 2
layers_D = []
for prev, curr, ln in zip(layer_dims, layer_dims[1:], layerNorms):
layers_D += [
Conv2d(prev[0], curr[0], 4, 2, 1, bias=False),
LayerNorm(ln),
LeakyReLU(0.2, inplace=True),
]
layers_D += [Conv2d(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0), ReLU(True)]
return layers_D
def determine_layers_gen(side, random_dim, num_channels):
assert side >= 4 and side <= 64
layer_dims = [(1, side), (num_channels, side // 2)]
while layer_dims[-1][1] > 3 and len(layer_dims) < 4:
layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2))
layerNorms = []
num_c = num_channels * (2 ** (len(layer_dims) - 2))
num_s = int(side / (2 ** (len(layer_dims) - 1)))
for l in range(len(layer_dims) - 1):
layerNorms.append([int(num_c), int(num_s), int(num_s)])
num_c = num_c / 2
num_s = num_s * 2
layers_G = [ConvTranspose2d(random_dim, layer_dims[-1][0], layer_dims[-1][1], 1, 0, output_padding=0, bias=False)]
for prev, curr, ln in zip(reversed(layer_dims), reversed(layer_dims[:-1]), layerNorms):
layers_G += [LayerNorm(ln), ReLU(True), ConvTranspose2d(prev[0], curr[0], 4, 2, 1, output_padding=0, bias=True)]
return layers_G
def slerp(val, low, high):
low_norm = low/torch.norm(low, dim=1, keepdim=True)
high_norm = high/torch.norm(high, dim=1, keepdim=True)
omega = torch.acos((low_norm*high_norm).sum(1)).view(val.size(0), 1)
so = torch.sin(omega)
res = (torch.sin((1.0-val)*omega)/so)*low + (torch.sin(val*omega)/so) * high
return res
def calc_gradient_penalty_slerp(netD, real_data, fake_data, transformer, device='cpu', lambda_=10):
batchsize = real_data.shape[0]
alpha = torch.rand(batchsize, 1, device=device)
interpolates = slerp(alpha, real_data, fake_data)
interpolates = interpolates.to(device)
interpolates = transformer.transform(interpolates)
interpolates = torch.autograd.Variable(interpolates, requires_grad=True)
disc_interpolates,_ = netD(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients_norm = gradients.norm(2, dim=1)
gradient_penalty = ((gradients_norm - 1) ** 2).mean() * lambda_
return gradient_penalty
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0)
class CTABGANSynthesizer:
def __init__(self,
class_dim=(256, 256, 256, 256),
random_dim=100,
num_channels=64,
l2scale=1e-5,
batch_size=500,
epochs=150):
self.random_dim = random_dim
self.class_dim = class_dim
self.num_channels = num_channels
self.dside = None
self.gside = None
self.l2scale = l2scale
self.batch_size = batch_size
self.epochs = epochs
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def fit(self, train_data=pd.DataFrame, categorical=[], mixed={}, general=[], non_categorical=[], type={}):
problem_type = None
target_index=None
if type:
problem_type = list(type.keys())[0]
if problem_type:
target_index = train_data.columns.get_loc(type[problem_type])
self.transformer = DataTransformer(train_data=train_data, categorical_list=categorical, mixed_dict=mixed, general_list=general, non_categorical_list=non_categorical)
self.transformer.fit()
train_data = self.transformer.transform(train_data.values)
data_sampler = Sampler(train_data, self.transformer.output_info)
data_dim = self.transformer.output_dim
self.cond_generator = Cond(train_data, self.transformer.output_info)
sides = [4, 8, 16, 24, 32, 64]
col_size_d = data_dim + self.cond_generator.n_opt
for i in sides:
if i * i >= col_size_d:
self.dside = i
break
sides = [4, 8, 16, 24, 32, 64]
col_size_g = data_dim
for i in sides:
if i * i >= col_size_g:
self.gside = i
break
layers_G = determine_layers_gen(self.gside, self.random_dim+self.cond_generator.n_opt, self.num_channels)
layers_D = determine_layers_disc(self.dside, self.num_channels)
self.generator = Generator(self.gside, layers_G).to(self.device)
discriminator = Discriminator(self.dside, layers_D).to(self.device)
optimizer_params = dict(lr=2e-4, betas=(0.5, 0.9), eps=1e-3, weight_decay=self.l2scale)
optimizerG = Adam(self.generator.parameters(), **optimizer_params)
optimizerD = Adam(discriminator.parameters(), **optimizer_params)
st_ed = None
classifier=None
optimizerC= None
if target_index != None:
st_ed= get_st_ed(target_index,self.transformer.output_info)
classifier = Classifier(data_dim,self.class_dim,st_ed).to(self.device)
optimizerC = optim.Adam(classifier.parameters(),**optimizer_params)
self.generator.apply(weights_init)
discriminator.apply(weights_init)
self.Gtransformer = ImageTransformer(self.gside)
self.Dtransformer = ImageTransformer(self.dside)
epsilon = 0
epoch = 0
steps = 0
ci = 1
steps_per_epoch = max(1, len(train_data) // self.batch_size)
for i in tqdm(range(self.epochs)):
for id_ in range(steps_per_epoch):
for _ in range(ci):
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample_train(self.batch_size)
c, m, col, opt = condvec
c = torch.from_numpy(c).to(self.device)
m = torch.from_numpy(m).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
perm = np.arange(self.batch_size)
np.random.shuffle(perm)
real = data_sampler.sample(self.batch_size, col[perm], opt[perm])
c_perm = c[perm]
real = torch.from_numpy(real.astype('float32')).to(self.device)
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, self.transformer.output_info)
fake_cat = torch.cat([fakeact, c], dim=1)
real_cat = torch.cat([real, c_perm], dim=1)
real_cat_d = self.Dtransformer.transform(real_cat)
fake_cat_d = self.Dtransformer.transform(fake_cat)
optimizerD.zero_grad()
d_real,_ = discriminator(real_cat_d)
d_real = -torch.mean(d_real)
d_real.backward()
d_fake,_ = discriminator(fake_cat_d)
d_fake = torch.mean(d_fake)
d_fake.backward()
pen = calc_gradient_penalty_slerp(discriminator, real_cat, fake_cat, self.Dtransformer , self.device)
pen.backward()
optimizerD.step()
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample_train(self.batch_size)
c, m, col, opt = condvec
c = torch.from_numpy(c).to(self.device)
m = torch.from_numpy(m).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
optimizerG.zero_grad()
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, self.transformer.output_info)
fake_cat = torch.cat([fakeact, c], dim=1)
fake_cat = self.Dtransformer.transform(fake_cat)
y_fake,info_fake = discriminator(fake_cat)
cross_entropy = cond_loss(faket, self.transformer.output_info, c, m)
_,info_real = discriminator(real_cat_d)
g = -torch.mean(y_fake) + cross_entropy
g.backward(retain_graph=True)
loss_mean = torch.norm(torch.mean(info_fake.view(self.batch_size,-1), dim=0) - torch.mean(info_real.view(self.batch_size,-1), dim=0), 1)
loss_std = torch.norm(torch.std(info_fake.view(self.batch_size,-1), dim=0) - torch.std(info_real.view(self.batch_size,-1), dim=0), 1)
loss_info = loss_mean + loss_std
loss_info.backward()
optimizerG.step()
if problem_type:
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, self.transformer.output_info)
real_pre, real_label = classifier(real)
fake_pre, fake_label = classifier(fakeact)
c_loss = CrossEntropyLoss()
if (st_ed[1] - st_ed[0])==1:
c_loss= SmoothL1Loss()
real_label = real_label.type_as(real_pre)
fake_label = fake_label.type_as(fake_pre)
real_label = torch.reshape(real_label,real_pre.size())
fake_label = torch.reshape(fake_label,fake_pre.size())
elif (st_ed[1] - st_ed[0])==2:
c_loss = BCELoss()
real_label = real_label.type_as(real_pre)
fake_label = fake_label.type_as(fake_pre)
loss_cc = c_loss(real_pre, real_label)
loss_cg = c_loss(fake_pre, fake_label)
optimizerG.zero_grad()
loss_cg.backward()
optimizerG.step()
optimizerC.zero_grad()
loss_cc.backward()
optimizerC.step()
epoch += 1
def sample(self, n):
self.generator.eval()
output_info = self.transformer.output_info
steps = n // self.batch_size + 1
data = []
for i in range(steps):
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample(self.batch_size)
c = condvec
c = torch.from_numpy(c).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket,output_info)
data.append(fakeact.detach().cpu().numpy())
data = np.concatenate(data, axis=0)
result,resample = self.transformer.inverse_transform(data)
while len(result) < n:
data_resample = []
steps_left = resample// self.batch_size + 1
for i in range(steps_left):
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample(self.batch_size)
c = condvec
c = torch.from_numpy(c).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, output_info)
data_resample.append(fakeact.detach().cpu().numpy())
data_resample = np.concatenate(data_resample, axis=0)
res,resample = self.transformer.inverse_transform(data_resample)
result = np.concatenate([result,res],axis=0)
return result[0:n]
| 21,711 | 35.186667 | 173 | py |
CTAB-GAN-Plus | CTAB-GAN-Plus-main/model/synthesizer/transformer.py | import numpy as np
import pandas as pd
import torch
from sklearn.mixture import BayesianGaussianMixture
class DataTransformer():
def __init__(self, train_data=pd.DataFrame, categorical_list=[], mixed_dict={}, general_list=[], non_categorical_list=[], n_clusters=10, eps=0.005):
self.meta = None
self.n_clusters = n_clusters
self.eps = eps
self.train_data = train_data
self.categorical_columns= categorical_list
self.mixed_columns= mixed_dict
self.general_columns = general_list
self.non_categorical_columns= non_categorical_list
def get_metadata(self):
meta = []
for index in range(self.train_data.shape[1]):
column = self.train_data.iloc[:,index]
if index in self.categorical_columns:
if index in self.non_categorical_columns:
meta.append({
"name": index,
"type": "continuous",
"min": column.min(),
"max": column.max(),
})
else:
mapper = column.value_counts().index.tolist()
meta.append({
"name": index,
"type": "categorical",
"size": len(mapper),
"i2s": mapper
})
elif index in self.mixed_columns.keys():
meta.append({
"name": index,
"type": "mixed",
"min": column.min(),
"max": column.max(),
"modal": self.mixed_columns[index]
})
else:
meta.append({
"name": index,
"type": "continuous",
"min": column.min(),
"max": column.max(),
})
return meta
def fit(self):
data = self.train_data.values
self.meta = self.get_metadata()
model = []
self.ordering = []
self.output_info = []
self.output_dim = 0
self.components = []
self.filter_arr = []
for id_, info in enumerate(self.meta):
if info['type'] == "continuous":
if id_ not in self.general_columns:
gm = BayesianGaussianMixture(
n_components = self.n_clusters,
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=0.001,
max_iter=100,n_init=1, random_state=42)
gm.fit(data[:, id_].reshape([-1, 1]))
mode_freq = (pd.Series(gm.predict(data[:, id_].reshape([-1, 1]))).value_counts().keys())
model.append(gm)
old_comp = gm.weights_ > self.eps
comp = []
for i in range(self.n_clusters):
if (i in (mode_freq)) & old_comp[i]:
comp.append(True)
else:
comp.append(False)
self.components.append(comp)
self.output_info += [(1, 'tanh','no_g'), (np.sum(comp), 'softmax')]
self.output_dim += 1 + np.sum(comp)
else:
model.append(None)
self.components.append(None)
self.output_info += [(1, 'tanh','yes_g')]
self.output_dim += 1
elif info['type'] == "mixed":
gm1 = BayesianGaussianMixture(
n_components = self.n_clusters,
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=0.001, max_iter=100,
n_init=1,random_state=42)
gm2 = BayesianGaussianMixture(
n_components = self.n_clusters,
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=0.001, max_iter=100,
n_init=1,random_state=42)
gm1.fit(data[:, id_].reshape([-1, 1]))
filter_arr = []
for element in data[:, id_]:
if element not in info['modal']:
filter_arr.append(True)
else:
filter_arr.append(False)
gm2.fit(data[:, id_][filter_arr].reshape([-1, 1]))
mode_freq = (pd.Series(gm2.predict(data[:, id_][filter_arr].reshape([-1, 1]))).value_counts().keys())
self.filter_arr.append(filter_arr)
model.append((gm1,gm2))
old_comp = gm2.weights_ > self.eps
comp = []
for i in range(self.n_clusters):
if (i in (mode_freq)) & old_comp[i]:
comp.append(True)
else:
comp.append(False)
self.components.append(comp)
self.output_info += [(1, 'tanh',"no_g"), (np.sum(comp) + len(info['modal']), 'softmax')]
self.output_dim += 1 + np.sum(comp) + len(info['modal'])
else:
model.append(None)
self.components.append(None)
self.output_info += [(info['size'], 'softmax')]
self.output_dim += info['size']
self.model = model
def transform(self, data, ispositive = False, positive_list = None):
values = []
mixed_counter = 0
for id_, info in enumerate(self.meta):
current = data[:, id_]
if info['type'] == "continuous":
if id_ not in self.general_columns:
current = current.reshape([-1, 1])
means = self.model[id_].means_.reshape((1, self.n_clusters))
stds = np.sqrt(self.model[id_].covariances_).reshape((1, self.n_clusters))
features = np.empty(shape=(len(current),self.n_clusters))
if ispositive == True:
if id_ in positive_list:
features = np.abs(current - means) / (4 * stds)
else:
features = (current - means) / (4 * stds)
probs = self.model[id_].predict_proba(current.reshape([-1, 1]))
n_opts = sum(self.components[id_])
features = features[:, self.components[id_]]
probs = probs[:, self.components[id_]]
opt_sel = np.zeros(len(data), dtype='int')
for i in range(len(data)):
pp = probs[i] + 1e-6
pp = pp / sum(pp)
opt_sel[i] = np.random.choice(np.arange(n_opts), p=pp)
idx = np.arange((len(features)))
features = features[idx, opt_sel].reshape([-1, 1])
features = np.clip(features, -.99, .99)
probs_onehot = np.zeros_like(probs)
probs_onehot[np.arange(len(probs)), opt_sel] = 1
re_ordered_phot = np.zeros_like(probs_onehot)
col_sums = probs_onehot.sum(axis=0)
n = probs_onehot.shape[1]
largest_indices = np.argsort(-1*col_sums)[:n]
self.ordering.append(largest_indices)
for id,val in enumerate(largest_indices):
re_ordered_phot[:,id] = probs_onehot[:,val]
values += [features, re_ordered_phot]
else:
self.ordering.append(None)
if id_ in self.non_categorical_columns:
info['min'] = -1e-3
info['max'] = info['max'] + 1e-3
current = (current - (info['min'])) / (info['max'] - info['min'])
current = current * 2 - 1
current = current.reshape([-1, 1])
values.append(current)
elif info['type'] == "mixed":
means_0 = self.model[id_][0].means_.reshape([-1])
stds_0 = np.sqrt(self.model[id_][0].covariances_).reshape([-1])
zero_std_list = []
means_needed = []
stds_needed = []
for mode in info['modal']:
if mode!=-9999999:
dist = []
for idx,val in enumerate(list(means_0.flatten())):
dist.append(abs(mode-val))
index_min = np.argmin(np.array(dist))
zero_std_list.append(index_min)
else: continue
for idx in zero_std_list:
means_needed.append(means_0[idx])
stds_needed.append(stds_0[idx])
mode_vals = []
for i,j,k in zip(info['modal'],means_needed,stds_needed):
this_val = np.abs(i - j) / (4*k)
mode_vals.append(this_val)
if -9999999 in info["modal"]:
mode_vals.append(0)
current = current.reshape([-1, 1])
filter_arr = self.filter_arr[mixed_counter]
current = current[filter_arr]
means = self.model[id_][1].means_.reshape((1, self.n_clusters))
stds = np.sqrt(self.model[id_][1].covariances_).reshape((1, self.n_clusters))
features = np.empty(shape=(len(current),self.n_clusters))
if ispositive == True:
if id_ in positive_list:
features = np.abs(current - means) / (4 * stds)
else:
features = (current - means) / (4 * stds)
probs = self.model[id_][1].predict_proba(current.reshape([-1, 1]))
n_opts = sum(self.components[id_]) # 8
features = features[:, self.components[id_]]
probs = probs[:, self.components[id_]]
opt_sel = np.zeros(len(current), dtype='int')
for i in range(len(current)):
pp = probs[i] + 1e-6
pp = pp / sum(pp)
opt_sel[i] = np.random.choice(np.arange(n_opts), p=pp)
idx = np.arange((len(features)))
features = features[idx, opt_sel].reshape([-1, 1])
features = np.clip(features, -.99, .99)
probs_onehot = np.zeros_like(probs)
probs_onehot[np.arange(len(probs)), opt_sel] = 1
extra_bits = np.zeros([len(current), len(info['modal'])])
temp_probs_onehot = np.concatenate([extra_bits,probs_onehot], axis = 1)
final = np.zeros([len(data), 1 + probs_onehot.shape[1] + len(info['modal'])])
features_curser = 0
for idx, val in enumerate(data[:, id_]):
if val in info['modal']:
category_ = list(map(info['modal'].index, [val]))[0]
final[idx, 0] = mode_vals[category_]
final[idx, (category_+1)] = 1
else:
final[idx, 0] = features[features_curser]
final[idx, (1+len(info['modal'])):] = temp_probs_onehot[features_curser][len(info['modal']):]
features_curser = features_curser + 1
just_onehot = final[:,1:]
re_ordered_jhot= np.zeros_like(just_onehot)
n = just_onehot.shape[1]
col_sums = just_onehot.sum(axis=0)
largest_indices = np.argsort(-1*col_sums)[:n]
self.ordering.append(largest_indices)
for id,val in enumerate(largest_indices):
re_ordered_jhot[:,id] = just_onehot[:,val]
final_features = final[:,0].reshape([-1, 1])
values += [final_features, re_ordered_jhot]
mixed_counter = mixed_counter + 1
else:
self.ordering.append(None)
col_t = np.zeros([len(data), info['size']])
idx = list(map(info['i2s'].index, current))
col_t[np.arange(len(data)), idx] = 1
values.append(col_t)
return np.concatenate(values, axis=1)
def inverse_transform(self, data):
data_t = np.zeros([len(data), len(self.meta)])
invalid_ids = []
st = 0
for id_, info in enumerate(self.meta):
if info['type'] == "continuous":
if id_ not in self.general_columns:
u = data[:, st]
v = data[:, st + 1:st + 1 + np.sum(self.components[id_])]
order = self.ordering[id_]
v_re_ordered = np.zeros_like(v)
for id,val in enumerate(order):
v_re_ordered[:,val] = v[:,id]
v = v_re_ordered
u = np.clip(u, -1, 1)
v_t = np.ones((data.shape[0], self.n_clusters)) * -100
v_t[:, self.components[id_]] = v
v = v_t
st += 1 + np.sum(self.components[id_])
means = self.model[id_].means_.reshape([-1])
stds = np.sqrt(self.model[id_].covariances_).reshape([-1])
p_argmax = np.argmax(v, axis=1)
std_t = stds[p_argmax]
mean_t = means[p_argmax]
tmp = u * 4 * std_t + mean_t
for idx,val in enumerate(tmp):
if (val < info["min"]) | (val > info['max']):
invalid_ids.append(idx)
if id_ in self.non_categorical_columns:
tmp = np.round(tmp)
data_t[:, id_] = tmp
else:
u = data[:, st]
u = (u + 1) / 2
u = np.clip(u, 0, 1)
u = u * (info['max'] - info['min']) + info['min']
if id_ in self.non_categorical_columns:
data_t[:, id_] = np.round(u)
else: data_t[:, id_] = u
st += 1
elif info['type'] == "mixed":
u = data[:, st]
full_v = data[:,(st+1):(st+1)+len(info['modal'])+np.sum(self.components[id_])]
order = self.ordering[id_]
full_v_re_ordered = np.zeros_like(full_v)
for id,val in enumerate(order):
full_v_re_ordered[:,val] = full_v[:,id]
full_v = full_v_re_ordered
mixed_v = full_v[:,:len(info['modal'])]
v = full_v[:,-np.sum(self.components[id_]):]
u = np.clip(u, -1, 1)
v_t = np.ones((data.shape[0], self.n_clusters)) * -100
v_t[:, self.components[id_]] = v
v = np.concatenate([mixed_v,v_t], axis=1)
st += 1 + np.sum(self.components[id_]) + len(info['modal'])
means = self.model[id_][1].means_.reshape([-1])
stds = np.sqrt(self.model[id_][1].covariances_).reshape([-1])
p_argmax = np.argmax(v, axis=1)
result = np.zeros_like(u)
for idx in range(len(data)):
if p_argmax[idx] < len(info['modal']):
argmax_value = p_argmax[idx]
result[idx] = float(list(map(info['modal'].__getitem__, [argmax_value]))[0])
else:
std_t = stds[(p_argmax[idx]-len(info['modal']))]
mean_t = means[(p_argmax[idx]-len(info['modal']))]
result[idx] = u[idx] * 4 * std_t + mean_t
for idx,val in enumerate(result):
if (val < info["min"]) | (val > info['max']):
invalid_ids.append(idx)
data_t[:, id_] = result
else:
current = data[:, st:st + info['size']]
st += info['size']
idx = np.argmax(current, axis=1)
data_t[:, id_] = list(map(info['i2s'].__getitem__, idx))
invalid_ids = np.unique(np.array(invalid_ids))
all_ids = np.arange(0,len(data))
valid_ids = list(set(all_ids) - set(invalid_ids))
return data_t[valid_ids],len(invalid_ids)
class ImageTransformer():
def __init__(self, side):
self.height = side
def transform(self, data):
if self.height * self.height > len(data[0]):
padding = torch.zeros((len(data), self.height * self.height - len(data[0]))).to(data.device)
data = torch.cat([data, padding], axis=1)
return data.view(-1, 1, self.height, self.height)
def inverse_transform(self, data):
data = data.view(-1, self.height * self.height)
return data
| 17,809 | 40.418605 | 152 | py |
AutoCO | AutoCO-main/exp_simulate/dataset/data_nas/data_loader.py | from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder
import numpy as np
import random
import re
import json
import pandas as pd
tfs_feature_file = 'raw_data/tfs_features2.txt'
log_file = "raw_data/log_online.txt"
cnt = 0
X_template = []
X_cate1 = []
X_cate2 = []
X_encode = []
X_encode_0 = []
X_encode_1 = []
X_encode_2 = []
X_encode_3 = []
X_pic = []
Y = []
tfs_feat = {}
with open(tfs_feature_file, 'r') as f:
for line in f:
seq = line.split()
tfs = seq[0]
if tfs not in tfs_feat.keys():
# tfs_feat[tfs] = seq[1]
tt = [float(s) for s in seq[1].split(',')]
mu = np.mean(tt)
std = np.std(tt, ddof=1)
tfs_feat[tfs] = [float(s-mu)/std for s in tt]
with open(log_file, 'r') as ff:
for line in ff:
line = line.split('\n')[0]
line = line.split('//')
tfs = line[3]
tid = line[5]
clk = 1 if line[1][0] is not '(' else 0
# if clk==0 and np.random.rand>0.8:
# continue
encode = [int(e) for e in line[2].split()]
encode_len = len(encode)
# print(encode_len)
# print(encode)
cate1 = line[4]
if tfs in tfs_feat.keys():
tfs_v = tfs_feat[tfs]
X_template.append(int(tid))
X_cate1.append(int(cate1))
X_encode.append(encode)
X_encode_0.append(encode[0])
X_encode_1.append(encode[1])
X_encode_2.append(encode[2])
X_encode_3.append(encode[3])
X_pic.append(tfs_v)
Y.append(clk)
cnt += 1
# if cnt >1010000:
# break
elif tfs not in tfs_feat.keys():
print("Error ! No tfs!!!", tfs)
print("finish loading")
le = LabelEncoder()
X_template = le.fit_transform(X_template)
X_cate1 = le.fit_transform(X_cate1)
X_encode_0 = le.fit_transform(X_encode_0)
X_encode_1 = le.fit_transform(X_encode_1)
X_encode_2 = le.fit_transform(X_encode_2)
X_encode_3 = le.fit_transform(X_encode_3)
# scale = StandardScaler()
# X_pic = scale.fit_transform(X_pic)
# length = len(Y)
length = len(Y)
data = []
ind = 0
item_dict = {}
distinct_encode = []
dict_encode = {}
item_encode = {}
feature_list = []
pro_list = []
for x in range(length):
if x%20000==0:
print(x,' has generated')
data.append([])
data[x].extend(X_pic[x])
data[x].append(X_cate1[x])
data[x].append(X_template[x])
if X_encode[x] not in distinct_encode:
distinct_encode.append(X_encode[x])
dict_encode[','.join([str(e) for e in X_encode[x]])] = len(distinct_encode)-1
key = ','.join([str(e) for e in data[x]])
if key not in item_dict.keys():
item_dict[key] = ind
feature_list.append(data[x].copy())
pro_list.append(0)
item_encode[ind] = []
ind += 1
pro_list[item_dict[key]]+=1
t = dict_encode[','.join([str(e) for e in X_encode[x]])]
if t not in item_encode[item_dict[key]]:
item_encode[item_dict[key]].append(t)
data[x].append(X_encode_0[x])
data[x].append(X_encode_1[x])
data[x].append(X_encode_2[x])
data[x].append(X_encode_3[x])
# print(data[x])
# data[x].insert(0, item_dict[key])
data[x].append(Y[x])
for i in range(len(pro_list)):
pro_list[i]/=float(length)
out_file = 'dataset/dataset_1400.pkl'
data_p = pd.DataFrame(data)
# # print(data_p.shape)
data_p.to_pickle(out_file)
# out_file = "data_nas/log.pkl"
out_file_1 = "./creative_list.pkl"
out_file_2 = './item_candidates.pkl'
out_file_3 = './feature_list.pkl'
out_file_4 = './pro_list.pkl'
import pickle
# with open(out_file, 'wb') as d:
# pickle.dump(data, d)
with open(out_file_1, 'wb') as d:
pickle.dump(distinct_encode, d)
with open(out_file_2, 'wb') as d:
pickle.dump(item_encode, d)
with open(out_file_3, 'wb') as d:
pickle.dump(feature_list, d)
with open(out_file_4, 'wb') as d:
pickle.dump(pro_list, d) | 4,021 | 26.360544 | 85 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/simulate.py | import io
import os
import time
import pickle
import random
import logging
import datetime
import argparse
import coloredlogs
import numpy as np
import pandas as pd
from torch.utils.data import WeightedRandomSampler
from policy import FmEGreedy, Random, Greedy, FmGreedy, LinUCB, FmThompson, TS
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG')
def click(x, k, item_ctr, r_t):
"return the reward for impression"
if r_t == "click":
t = random.uniform(0,1)
if item_ctr[x][k] > t:
return 1
return 0
else:
return item_ctr[x][k]
def process_batch(features_list, memory, policy, item_ctr):
"recommend items under current policy"
# data: the impression items in current batch
data = []
for x in memory:
t = [x]
t.extend(features_list[x])
data.append(t)
# policy recommend creative
if policy.name == 'Greedy':
res = policy.recommend_batch(data, item_ctr)
else:
res = policy.recommend_batch(data)
return res
def evaluate(policy, params):
"process of simulation"
# initial reward recorder var
record_arr = [0.5,1,2,3,4,5,6,7,8,9,10,12,14,16,18,20,22,24,26,28,30,32,36,40,45,50,60,70,80,90,
100,110,120,130,140,150,160,170,180,190,200,210,220,230,240,250,
260,280,300,330,360,400,500]
score, impressions = 0.0, 1.0
ctr_reward, auc_reward = [], []
# initial data recorder var
memory, record, r_list, eg_ind = [], [], [], []
cnt = 0
initial = 0
# initial background information
f = open(params["feature_list"], 'rb')
f_list = pickle.load(f)
f = open(params["pro_list"], 'rb')
pro_list = pickle.load(f)
f = open(params['creative_list'], 'rb')
c_list = pickle.load(f)
f = open(params['item_candidates'], 'rb')
item_candi = pickle.load(f)
f = open(params['item_ctr'], 'rb')
item_ctr = pickle.load(f)
f_len = len(f_list[0])
leng = f_len+ len(c_list[0])
item_cnt = params['batch']
df = pd.read_pickle(params["random_file"])
warm_start = list(df.to_numpy())
record = warm_start[200000:202000]
# the main process of simulation
while impressions <= params["iter"]:
cnt += 1
item_cnt += 1
# decide which item to display
if item_cnt >= params['batch']:
item_list = list(WeightedRandomSampler(pro_list, params['batch']))
item_cnt = 0
x = item_list[item_cnt]
# if cnt < params["batch"]:
# line = f_list[x].copy()
# k = np.random.randint(0, len(item_candi[x]))
# line.extend(c_list[item_candi[x][k]])
# line.append(click(x, k, item_ctr, params["simulate_type"]))
# record.append(line)
# eg_ind.append(x)
# continue
# update policy with batch data
if len(record) >= params['batch']-3 or initial == 0:
initial = 1
auc_reward.append(policy.update(record, eg_ind))
record = []
eg_ind = []
# collect reward in current batch
memory.append(x)
if len(memory) % params['s_batch']== 0 and len(memory)>0 :
r_list = process_batch(f_list, memory, policy, item_ctr)
for i in range(len(r_list)):
line = f_list[memory[i]].copy()
t= item_candi[memory[i]][r_list[i]]
line.extend(c_list[t])
reward = click(memory[i], r_list[i], item_ctr, params["simulate_type"])
line.append(reward)
record.append(line)
eg_ind.append(memory[i])
score += reward
impressions += 1
if impressions%10000 == 0:
logger.debug('{} behaviour has been generated, Ctr is {}!!!'.format(impressions, score/(impressions)))
print(ctr_reward)
if impressions/10000 in record_arr:
ctr_reward.append(score/impressions)
# if impressions%1000000 == 0:
# policy.update_in_log()
memory.clear()
# policy.print()
score /= impressions
print("CTR achieved by the policy: %.5f" % score)
return ctr_reward, auc_reward
def run(params):
model = params['model_ee']
if model == 'fmeg':
policy = FmEGreedy(params)
elif model == 'fmgreedy':
policy = FmGreedy(params)
elif model == 'random':
policy = Random(params)
elif model == 'greedy':
policy = Greedy(params)
elif model == 'ts':
policy = TS(params)
elif model == 'linucb':
policy = LinUCB(params)
elif model == "fmts":
policy = FmThompson(params)
else:
print("No model named ", model, " !!!")
return
res, auc_res = evaluate(policy, params)
return res, auc_res
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--info_path', type=str, default='dataset/data_nas')
parser.add_argument('--data_name', type=str, default="rand_0.15")
parser.add_argument('--simulate_type', type=str, default="click")
parser.add_argument('--iter', type=int, default=200000)
parser.add_argument('--dim', type=int, default=8)
parser.add_argument('--model_ee', type=str, default='random')
parser.add_argument('--model_nas', type=str, default="fm")
parser.add_argument('--oper', type=str, default='multiply')
parser.add_argument('--model_struct', type=int, default=0)
parser.add_argument('--epoch', type=int, default=1)
parser.add_argument('--decay', type=float, default=0.0001)
parser.add_argument('--learning', type=float, default=0.001)
parser.add_argument('--batch', type=int, default=5000)
parser.add_argument('--s_batch', type=int, default=5000)
parser.add_argument('--alpha', type=float, default=0.01)
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--times', type=int, default=1)
parser.add_argument('--update', type=int, default=0)
parser.add_argument('--data_size', type=int, default=-1)
parser.add_argument('--sample', type=int, default=0)
parser.add_argument('--record', type=int, default=0)
parser.add_argument('--optimizer', type=str, default='adam')
parser.add_argument('--trick', type=int, default=0)
parser.add_argument('--first_order', type=int, default=0)
parser.add_argument('--calcu_dense', type=int, default=0)
parser.add_argument('--ts_trick', type=int, default=0)
parser.add_argument('--auc_record', type=int, default=0)
args = parser.parse_args()
params = {"warm_file": os.path.join(args.info_path, args.data_name,"warm_start.pkl"),
"creative_list": os.path.join(args.info_path, "creative_list.pkl"),
"item_candidates": os.path.join(args.info_path, "item_candidates.pkl"),
"pro_list": os.path.join(args.info_path, "pro_list.pkl"),
"feature_list": os.path.join(args.info_path, "feature_list.pkl"),
"feature_size": os.path.join(args.info_path, "feature_size.pkl"),
"item_ctr": os.path.join(args.info_path, args.data_name,"item_ctr.pkl"),
"random_file": os.path.join(args.info_path, args.data_name,"random_log.pkl"),
"model_ee": args.model_ee,
'learning': args.learning,
"batch": args.batch,
"dim": args.dim,
"simulate_type": args.simulate_type,
"ts_trick": args.ts_trick,
"arch": [0,1,0,3,0,3,0,1,4,2,0,4,3,2,0],
"trick": args.trick,
"model_struct": args.model_struct,
"model_nas": args.model_nas,
"operator": args.oper,
"s_batch": args.s_batch,
"epoch": args.epoch,
"decay": args.decay,
"device": args.device,
"times": args.times,
"iter": args.iter,
"first_order": args.first_order,
"update": args.update,
"auc_record": args.auc_record,
"data_size": args.data_size,
"sample": args.sample,
"alpha": args.alpha,
"record": args.record,
'optimizer': args.optimizer,
'calcu_dense': args.calcu_dense,
'dense': [0,6,0],
"early_fix_arch": False,
}
ISOTIMEFORMAT = '%m%d-%H%M%S'
timestamp = str(datetime.datetime.now().strftime(ISOTIMEFORMAT))
params["model_path"] = os.path.join(args.info_path, args.data_name,"model",timestamp+'.pt')
import json
params_str = json.dumps(params)
score, auc_score = [], []
for i in range(params['times']):
res, auc_res = run(params)
score.append(res)
filename = os.path.join(args.info_path, args.data_name,"res",params['model_ee']+"-"+timestamp+'.txt')
# filename = 'data/res/'+params['model']+"-"+timestamp+'.txt'
if params["record"] == 0:
with open(filename, 'w') as f:
f.write(params_str)
f.write('\n')
for i in range(len(score)):
s = [str(reward) for reward in score[i]]
f.write("time"+str(i)+": "+" ".join(s)+"\n")
if params["auc_record"] == 1:
auc_score.append(auc_res)
filename = os.path.join(args.info_path, args.data_name,"auc_res",params['model_ee']+"-"+timestamp+'.txt')
# filename = 'data/res/'+params['model']+"-"+timestamp+'.txt'
with open(filename, 'w') as f:
f.write(params_str)
f.write('\n')
for i in range(len(auc_score)):
s = [str(reward) for reward in auc_score[i]]
f.write("time"+str(i)+": "+" ".join(s)+"\n") | 9,990 | 37.724806 | 122 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/utils.py | from collections import defaultdict
import torch
from torch.optim.optimizer import Optimizer
class FTRL(Optimizer):
""" Implements FTRL online learning algorithm.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
alpha (float, optional): alpha parameter (default: 1.0)
beta (float, optional): beta parameter (default: 1.0)
l1 (float, optional): L1 regularization parameter (default: 1.0)
l2 (float, optional): L2 regularization parameter (default: 1.0)
.. _Ad Click Prediction: a View from the Trenches:
https://www.eecs.tufts.edu/%7Edsculley/papers/ad-click-prediction.pdf
"""
def __init__(self, params, alpha=1.0, beta=1.0, l1=1.0, l2=1.0):
if not 0.0 < alpha:
raise ValueError("Invalid alpha parameter: {}".format(alpha))
if not 0.0 < beta:
raise ValueError("Invalid beta parameter: {}".format(beta))
if not 0.0 <= l1:
raise ValueError("Invalid l1 parameter: {}".format(l1))
if not 0.0 <= l2:
raise ValueError("Invalid l2 parameter: {}".format(l2))
defaults = dict(alpha=alpha, beta=beta, l1=l1, l2=l2)
super(FTRL, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
if len(state) == 0:
state["z"] = torch.zeros_like(p.data)
state["n"] = torch.zeros_like(p.data)
z, n = state["z"], state["n"]
theta = (n + grad ** 2).sqrt() / group["alpha"] - n.sqrt()
z.add_(grad - theta * p.data)
n.add_(grad ** 2)
p.data = (
-1
/ (group["l2"] + (group["beta"] + n.sqrt()) / group["alpha"])
* (z - group["l1"] * z.sign())
)
p.data[z.abs() < group["l1"]] = 0
return loss
class DataPrefetcher():
def __init__(self, loader, device):
self.loader = iter(loader)
self.device = device
self.stream = torch.cuda.Stream()
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.batch = next(self.loader)
except StopIteration:
self.batch = None
return
with torch.cuda.stream(self.stream):
for k in range(len(self.batch)):
if k != 'meta':
self.batch[k] = self.batch[k].to(device=self.device, non_blocking=True)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
# self.next_input = self.next_input.float()
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
self.preload()
return batch
def cal_group_auc(labels, preds, user_id_list):
if len(user_id_list) != len(labels):
raise ValueError(
"impression id num should equal to the sample num," \
"impression id num is {0}".format(len(user_id_list)))
group_score = defaultdict(lambda: [])
group_truth = defaultdict(lambda: [])
for idx, truth in enumerate(labels):
user_id = user_id_list[idx]
score = preds[idx]
truth = labels[idx]
group_score[user_id].append(score)
group_truth[user_id].append(truth)
group_flag = defaultdict(lambda: False)
for user_id in set(user_id_list):
truths = group_truth[user_id]
flag = False
for i in range(len(truths) - 1):
if truths[i] != truths[i + 1]:
flag = True
break
group_flag[user_id] = flag
impression_total = 0
total_auc = 0
#
for user_id in group_flag:
if group_flag[user_id]:
auc = roc_auc_score(np.asarray(group_truth[user_id]), np.asarray(group_score[user_id]))
total_auc += auc * len(group_truth[user_id])
impression_total += len(group_truth[user_id])
group_auc = float(total_auc) / impression_total
group_auc = round(group_auc, 4)
return group_auc
| 4,886 | 33.907143 | 99 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/model.py | import torch
import torch.nn as nn
import torch.distributions.normal as normal
import torch.nn.functional as F
import math
import time
import numpy as np
import random
from torch.autograd import Variable
PRIMITIVES = ['concat', 'multiply', 'max', 'min', 'plus']
# PRIMITIVES = ['zero', 'max', 'min', 'multiply', 'plus', 'minus']
OPS = {
'zero': lambda p,q: torch.zeros_like(p).sum(2),
'plus': lambda p,q: (p + q).sum(2),
'minus': lambda p,q: (p - q).sum(2),
'multiply': lambda p, q: (p * q).sum(2),
'max': lambda p, q: torch.max(torch.stack((p, q)), dim=0)[0].sum(2),
'min': lambda p, q: torch.min(torch.stack((p, q)), dim=0)[0].sum(2),
'concat': lambda p, q: torch.cat([p, q], dim=-1).sum(2)
}
OPS_V = {
'zero': lambda p,q: torch.zeros_like(p),
'plus': lambda p,q: (p + q),
'minus': lambda p,q: (p - q),
'multiply': lambda p, q: (p * q),
'max': lambda p, q: torch.max(torch.stack((p, q)), dim=0)[0],
'min': lambda p, q: torch.min(torch.stack((p, q)), dim=0)[0],
'concat': lambda p, q: torch.cat([p, q], dim=-1)
}
def MixedBinary(embedding_p, embedding_q, weights, flag, dim=1, FC=None, o_type=0, others=0):
# print(weights)
# embedding_p = MLP[0](embedding_p.view(-1,1)).view(embedding_p.size())
# embedding_q = MLP[1](embedding_q.view(-1,1)).view(embedding_q.size())
if flag == 0:
max_ind = weights.argmax().item()
if o_type==0:
t = OPS[PRIMITIVES[max_ind]](embedding_p, embedding_q)
elif o_type==1:
begin, end = max_ind*dim, max_ind*dim+dim
t = OPS[PRIMITIVES[max_ind]](embedding_p[:,:,begin:end], embedding_q[:,:,begin:end])
elif o_type==2:
t = (FC[max_ind](OPS_V[PRIMITIVES[max_ind]](embedding_p, embedding_q))).squeeze(1)
elif o_type==3:
begin, end = max_ind*dim, max_ind*dim+dim
t = (FC[max_ind](OPS_V[PRIMITIVES[max_ind]](embedding_p[:,:,begin:end], embedding_q[:,:,begin:end]))).squeeze(1)
if others == 1:
t = t*weights[max_ind]
else:
if o_type==0:
t = torch.sum(torch.stack([w * (OPS[primitive](embedding_p, embedding_q)) for w,primitive in zip(weights, PRIMITIVES)]), 0)
elif o_type==1:
t = 0
for i in range(len(PRIMITIVES)):
begin, end = i*dim, i*dim+dim
t += weights[i] * (OPS[PRIMITIVES[i]](embedding_p[:,:,begin:end], embedding_q[:,:,begin:end]))
elif o_type==2:
t = 0
for i in range(len(PRIMITIVES)):
t += weights[i] * (FC[i](OPS_V[PRIMITIVES[i]](embedding_p, embedding_q))).squeeze(1)
elif o_type==3:
t = 0
for i in range(len(PRIMITIVES)):
begin, end = i*dim, i*dim+dim
t += weights[i] * (FC[i](OPS_V[PRIMITIVES[i]](embedding_p[:,:,begin:end], embedding_q[:,:,begin:end]))).squeeze(1)
return t
class OFM(nn.Module):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__()
print(feature_size)
self.name = "OFM"
self.field_size = len(feature_size)
print(feature_size)
self.feature_sizes = feature_size
self.dim = k
self.device = params["device"]
self.dense = params["dense"]
self.first_order = params["first_order"]
self.type = m_type
self.f_len = params["f_len"]
self.alpha = params["alpha"]
# self.nas_flag = nas_flag
# init bias
self.bias = nn.Parameter(torch.normal(torch.ones(1), 1),requires_grad=True)
self.params = params
# init first order
if self.first_order == 1:
fm_first_order_Linears = nn.ModuleList(
[nn.Linear(feature_size, 1, bias=False) for feature_size in self.feature_sizes[:self.dense[0]]])
fm_first_order_embeddings = nn.ModuleList(
[nn.Embedding(feature_size, 1) for feature_size in self.feature_sizes[self.dense[0]:self.dense[0]+self.dense[1]]])
fm_first_order_multi = nn.Embedding(self.dense[2], 1)
self.fm_first_order_models = fm_first_order_Linears.extend(fm_first_order_embeddings).append(fm_first_order_multi)
# self.bias = 0
# init second order
self._FC = None
if self.type == 0:
leng = self.dim
elif self.type == 1:
leng = self.dim*len(PRIMITIVES)
elif self.type == 2:
leng = self.dim
# leng = self.dim
self._FC = nn.ModuleList()
for primitive in PRIMITIVES:
if primitive == "concat":
self._FC.append(nn.Linear(2*self.dim, 1, bias=False))
else:
self._FC.append(nn.Linear(self.dim, 1, bias=False))
elif self.type == 3:
leng = self.dim*len(PRIMITIVES)
# leng = self.dim
self._FC = nn.ModuleList()
for primitive in PRIMITIVES:
if primitive == "concat":
self._FC.append(nn.Linear(2*self.dim, 1, bias=False))
else:
self._FC.append(nn.Linear(self.dim, 1, bias=False))
fm_second_order_Linears = nn.ModuleList(
[nn.Linear(feature_size, leng, bias=False) for feature_size in self.feature_sizes[:self.dense[0]]])
fm_second_order_embeddings = nn.ModuleList(
[nn.Embedding(feature_size, leng) for feature_size in self.feature_sizes[self.dense[0]:self.dense[0]+self.dense[1]]])
fm_second_order_multi = nn.Embedding(self.dense[2], leng)
self.fm_second_order_models = fm_second_order_Linears.extend(fm_second_order_embeddings).append(fm_second_order_multi)
# calcu the num of operation
if self.dense[2] != 0:
self.multi_len = int(self.dense[1]*(self.dense[1]+1)/2)+1
else:
self.multi_len = int(self.dense[1]*(self.dense[1]-1)/2)
# init arch parameters
self.inter_arr = [i for i in range(self.multi_len)]
self._arch_parameters = {}
self._arch_parameters['binary'] = Variable(torch.ones((self.multi_len, len(PRIMITIVES)), dtype=torch.float, device=params['device']) / 2, requires_grad=True)
self._arch_parameters['binary'].data.add_(
torch.randn_like(self._arch_parameters['binary'])*1e-3)
self.cost = [0 for _ in range(6)]
def arch_parameters(self):
return [self._arch_parameters['binary']]
def set_rand(self, num):
self.inter_arr = random.sample(self.inter_arr, num)
def forward(self, x, flag, weights=None):
if weights == None:
weights = self._arch_parameters['binary']
X = x.reshape(x.shape[0], x.shape[1], 1)
self.cost[0] -= time.time()
# calcu first order
out_first_order = 0
if self.first_order == 1:
for i, emb in enumerate(self.fm_first_order_models):
if i < self.dense[0]:
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.float)
out_first_order += torch.sum(emb(Xi_tem).unsqueeze(1), 1)
elif i < self.dense[0]+self.dense[1]:
self.cost[1] -= time.time()
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.long)
# print(i, Xi_tem, emb, self.feature_sizes)
out_first_order += torch.sum(emb(Xi_tem), 1)
self.cost[1] += time.time()
else:
self.cost[2] -= time.time()
for j in range(self.dense[2]):
Xi_tem = X[:, i+j, :].to(device=self.device, dtype=torch.long)
out_first_order += Xi_tem*emb(torch.Tensor([j]).to(device=self.device, dtype=torch.long))
self.cost[2] += time.time()
# record second order embedding
X_vector = []
for i, emb in enumerate(self.fm_second_order_models):
if i < self.dense[0]:
if self.params["calcu_dense"]==0:
continue
else:
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.float)
X_vector.append(emb(Xi_tem).unsqueeze(1))
elif i < self.dense[0]+self.dense[1]:
self.cost[3] -= time.time()
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.long)
X_vector.append(emb(Xi_tem))
# print(X_vector[-1].shape)
self.cost[3] += time.time()
else:
self.cost[4] -= time.time()
for j in range(self.dense[2]):
Xi_tem = X[:, i+j, :].to(device=self.device, dtype=torch.long)
X_vector.append((Xi_tem*emb(torch.Tensor([j]).to(device=self.device, dtype=torch.long))).unsqueeze(1))
# print(X_vector[-1].shape)
self.cost[4] += time.time()
# calcu second order
out_second_order = 0
self.cost[5] -= time.time()
cnt = 0
multi_hot_len = 0
if self.dense[2] != 0:
multi_hot_len = 1
for i in range(len(X_vector)):
for j in range(i):
if i < len(self.feature_sizes)-multi_hot_len:
tmp = cnt
elif j < len(self.feature_sizes)-multi_hot_len:
tmp = self.multi_len-len(self.feature_sizes)+j
else:
tmp = self.multi_len-1
cnt += 1
if tmp not in self.inter_arr:
continue
# if self.name != "SNAS":
# out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp,:], flag, dim=self.dim, FC=self._FC, o_type=self.type)
# else:
# out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp], flag, dim=self.dim, FC=self._FC, o_type=self.type)
if self.name == "SNAS":
out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp], flag, dim=self.dim, FC=self._FC, o_type=self.type)
elif self.name == "DSNAS":
out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp, :], flag, dim=self.dim, FC=self._FC, o_type=self.type, others=1)
else:
out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp, :], flag, dim=self.dim, FC=self._FC, o_type=self.type)
# print(out_second_order)
self.cost[5] += time.time()
self.cost[0] += time.time()
out = out_second_order+out_first_order+self.bias
return torch.sigmoid(out.squeeze(1))
def genotype(self):
genotype = [PRIMITIVES[self._arch_parameters['binary'][i, :].argmax().item()]
for i in range(self.multi_len)]
for i in range(self.multi_len):
if i not in self.inter_arr:
genotype[i] = "None"
print(genotype)
return genotype
# return genotype, genotype_p.cpu().detach()
def setotype(self, ops):
self.ops = ops
leng = len(self._arch_parameters['binary'][0,:])
for i in range(self.multi_len):
for j in range(leng):
if PRIMITIVES[j] == self.ops:
self._arch_parameters['binary'].data[i,j] = 1.0
else:
self._arch_parameters['binary'].data[i,j] = 0.0
def TS_initial(self):
self.rand_array = torch.randn(10000000)
self.ts_trick = self.params["ts_trick"]
if self.type == 0 or self.type == 2:
leng = self.dim
elif self.type == 1 or self.type == 3:
leng = self.dim*len(PRIMITIVES)
if self.first_order == 1:
fm_first_std_Linears = nn.ModuleList(
[nn.Linear(feature_size, 1, bias=False) for feature_size in self.feature_sizes[:self.dense[0]]])
fm_first_std_embeddings = nn.ModuleList(
[nn.Embedding(feature_size, 1) for feature_size in self.feature_sizes[self.dense[0]:self.dense[0]+self.dense[1]]])
fm_first_std_multi = nn.Embedding(self.dense[2], 1)
self.fm_first_std_models = fm_first_std_Linears.extend(fm_first_std_embeddings).append(fm_first_std_multi)
fm_second_std_Linears = nn.ModuleList(
[nn.Linear(feature_size, leng, bias=False) for feature_size in self.feature_sizes[:self.dense[0]]])
fm_second_std_embeddings = nn.ModuleList(
[nn.Embedding(feature_size, leng) for feature_size in self.feature_sizes[self.dense[0]:self.dense[0]+self.dense[1]]])
fm_second_std_multi = nn.Embedding(self.dense[2], leng)
self.fm_second_std_models = fm_second_std_Linears.extend(fm_second_std_embeddings).append(fm_second_std_multi)
def reparameterize(self, mu, std, alpha):
std = torch.log(1 + torch.exp(std)).to(self.device)
# v = torch.randn(batch, mu.shape[0], mu.shape[1]).to(self.device)
v = self.rand_array[:std.numel()].reshape(std.shape).to(self.device)
return (mu + alpha * std * v)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2/std1) + (std1*std1+(mean1-mean2)*(mean1-mean2))/2/std2/std2 - 1.0/2.0
return torch.sum(a)
def forward_ts(self, x, flag, weights=None, cal_kl=1):
if weights == None:
weights = self._arch_parameters['binary']
X = x.reshape(x.shape[0], x.shape[1], 1)
# if cal_kl==1:
# alpha = 1
# else:
# alpha = self.alpha
alpha = self.alpha
out_first_order = 0
if self.first_order == 1:
for i, emb in enumerate(self.fm_first_order_models):
if i < self.dense[0]:
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.float)
X_mean = torch.sum(emb(Xi_tem).unsqueeze(1), 1)
if i < self.f_len and self.ts_trick==0:
out_first_order += X_mean
else:
X_std = torch.sum(self.fm_first_std_models[i](Xi_tem).unsqueeze(1), 1)
out_first_order += self.reparameterize(X_mean, X_std, alpha)
elif i < self.dense[0]+self.dense[1]:
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.long)
# print(Xi_tem.shape)
X_mean = torch.sum(emb(Xi_tem), 1)
if i < self.f_len and self.ts_trick==0:
out_first_order += X_mean
else:
X_std = torch.sum(self.fm_first_std_models[i](Xi_tem), 1)
out_first_order += self.reparameterize(X_mean, X_std, alpha)
else:
for j in range(self.dense[2]):
Xi_tem = X[:, i+j, :].to(device=self.device, dtype=torch.long)
X_mean = Xi_tem*emb(torch.Tensor([j]).to(device=self.device, dtype=torch.long))
if i < self.f_len and self.ts_trick==0:
out_first_order += X_mean
else:
X_std = Xi_tem*self.fm_first_std_models[i](torch.Tensor([j]).to(device=self.device, dtype=torch.long))
out_first_order += self.reparameterize(X_mean, X_std, alpha)
X_vector = []
for i, emb in enumerate(self.fm_second_order_models):
if i < self.dense[0]:
if self.params["calcu_dense"]==0:
continue
else:
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.float)
X_mean = emb(Xi_tem).unsqueeze(1)
if i < self.f_len and self.ts_trick==0:
X_vector.append(X_mean)
else:
X_std = self.fm_second_std_models[i](Xi_tem).unsqueeze(1)
X_vector.append(self.reparameterize(X_mean, X_std, alpha))
elif i < self.dense[0]+self.dense[1]:
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.long)
X_mean = emb(Xi_tem)
if i < self.f_len and self.ts_trick==0:
X_vector.append(X_mean)
else:
X_std = self.fm_second_std_models[i](Xi_tem)
X_vector.append(self.reparameterize(X_mean, X_std, alpha))
else:
for j in range(self.dense[2]):
Xi_tem = X[:, i+j, :].to(device=self.device, dtype=torch.long)
X_mean = (Xi_tem*emb(torch.Tensor([j]).to(device=self.device, dtype=torch.long))).unsqueeze(1)
if i < self.f_len and self.ts_trick==0:
X_vector.append(X_mean)
else:
X_std = (Xi_tem*self.fm_second_std_models[i](torch.Tensor([j]).to(device=self.device, dtype=torch.long))).unsqueeze(1)
X_vector.append(self.reparameterize(X_mean, X_std, alpha))
out_second_order = 0
cnt = 0
multi_hot_len = 0
if self.dense[2] != 0:
multi_hot_len = 1
for i in range(len(X_vector)):
for j in range(i):
if i < len(self.feature_sizes)-multi_hot_len:
tmp = cnt
elif j < len(self.feature_sizes)-multi_hot_len:
tmp = -len(self.feature_sizes)+j
else:
tmp = -1
if self.name != "SNAS":
out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp,:], flag, dim=self.dim, FC=self._FC, o_type=self.type)
else:
out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp], flag, dim=self.dim, FC=self._FC, o_type=self.type)
cnt += 1
out = torch.sigmoid((out_second_order+out_first_order+self.bias).squeeze(1))
if cal_kl == 0:
return 0, out
# print(out.shape,out_second_order.shape,out_first_order.shape)
k = 0
for i, emb in enumerate(self.fm_first_order_models):
if i < self.f_len:
continue
k += self.KL_distance(emb.weight, 0*torch.ones_like(emb.weight), torch.log(1 + torch.exp(self.fm_first_std_models[i].weight)), 0.1*torch.ones_like(self.fm_first_std_models[i].weight))
for i, emb in enumerate(self.fm_second_order_models):
if i < self.f_len:
continue
k += self.KL_distance(emb.weight, 0*torch.ones_like(emb.weight), torch.log(1 + torch.exp(self.fm_second_std_models[i].weight)), 0.1*torch.ones_like(self.fm_second_std_models[i].weight))
# print(k.shape)
return k, out
class NAS(OFM):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__(feature_size, k, params, m_type)
self.name = "NAS"
def binarize(self):
self._cache = self._arch_parameters['binary'].clone()
max_index = [self._arch_parameters['binary'][i, :].argmax().item()
for i in range(self.multi_len)]
leng = len(self._arch_parameters['binary'][0,:])
for i in range(self.multi_len):
for j in range(leng):
if j == max_index[i]:
self._arch_parameters['binary'].data[i,j] = 1.0
else:
self._arch_parameters['binary'].data[i,j] = 0.0
# print(self._arch_parameters['binary'])
def recover(self):
self._arch_parameters['binary'].data = self._cache
del self._cache
def step(self, x, labels_valid, criterion, arch_optimizer, other):
# print(len(x),len(labels_valid))
self.zero_grad()
arch_optimizer.zero_grad()
# binarize before forward propagation
self.binarize()
inferences = self(x, 1)
loss = criterion(inferences, labels_valid)
# loss = F.binary_cross_entropy(input=inferences,target=labels_valid,reduction='mean')
loss.backward()
# restore weight before updating
self.recover()
arch_optimizer.step()
return loss
def print(self):
for name, i in self.named_parameters():
print(name,i)
return
class DSNAS(OFM):
def __init__(self, feature_size=None, k=None, params=None, m_type=0, args=None):
super(DSNAS, self).__init__(feature_size, k, params, m_type)
self.log_alpha = self._arch_parameters['binary']
self.weights = Variable(torch.zeros_like(self.log_alpha))
self.fix_arch_index = {}
self.args = args
self.name = "DSNAS"
def binarize(self):
pass
def recover(self):
return
def fix_arch(self):
if self.params["early_fix_arch"]:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.log_alpha.data[key, :] = value_lst[1]
sort_log_alpha = torch.topk(F.softmax(self.log_alpha.data, dim=-1), 2)
argmax_index = (sort_log_alpha[0][:, 0] - sort_log_alpha[0][:, 1] >= 0.3)
for id in range(argmax_index.size(0)):
if argmax_index[id] == 1 and id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [sort_log_alpha[1][id, 0].item(),
self.log_alpha.detach().clone()[id, :]]
def forward(self, x, flag, weights=None):
if weights == None:
weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim = -1).view(-1,1), 1)
return super().forward(x, flag, weights)
#only need training data for training architecture parameter and network parameters
def step(self, x, labels, criterion, arch_optimizer, optimizer):
error_loss = 0
loss_alpha = 0
self.fix_arch()
arch_optimizer.zero_grad()
optimizer.zero_grad()
if self.params["early_fix_arch"]:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
self.weights = self._get_weights(self.log_alpha)
cate_prob = F.softmax(self.log_alpha, dim=-1)
self.cate_prob = cate_prob.clone().detach()
loss_alpha = torch.log(
(self.weights * F.softmax(self.log_alpha, dim=-1)).sum(-1)).sum()
self.weights.requires_grad_()
inference = self(x, 1, self.weights)
error_loss = F.binary_cross_entropy(inference, labels.float())
self.weights.grad = torch.zeros_like(self.weights)
(error_loss + loss_alpha).backward()
self.block_reward = self.weights.grad.data.sum(-1)
self.log_alpha.grad.data.mul_(self.block_reward.view(-1, 1))
arch_optimizer.step()
optimizer.step()
return error_loss
def _get_weights(self, log_alpha):
# if self.args.random_sample:
# uni = torch.ones_like(log_alpha)
# m = torch.distributions.one_hot_categorical.OneHotCategorical(uni)
# else:
m = torch.distributions.one_hot_categorical.OneHotCategorical(probs=F.softmax(log_alpha, dim=-1))
return m.sample()
class SNAS(OFM):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__(feature_size, k, params, m_type)
self.arch_prob = [0 for _ in range(self.multi_len)]
self.name = "SNAS"
def binarize(self, temperature=0.00001):
self.g_softmax(temperature)
def recover(self):
return
def forward(self, x, flag):
return super().forward(x, flag, self.arch_prob)
def g_softmax(self, temperature):
self.temp = temperature
for i in range(self.multi_len):
# alpha = self._arch_parameters['binary'].data[i, :]
# m = torch.nn.functional.gumbel_softmax(alpha, tau=temperature, hard=False, eps=1e-10, dim=-1)
# m = torch.distributions.relaxed_categorical.RelaxedOneHotCategorical(
# torch.tensor([temperature]).to(self.device) , alpha)
# print("sam",m.sample(),"raw",self._arch_parameters['binary'].data[i, :])
self.arch_prob[i] = torch.nn.functional.gumbel_softmax(self._arch_parameters['binary'][i, :],tau=temperature, hard=False, eps=1e-10, dim=-1)
def step(self, x, labels_valid, criterion, arch_optimizer, temperature):
self.zero_grad()
arch_optimizer.zero_grad()
self.g_softmax(temperature)
inferences = self(x, 1)
loss = criterion(inferences, labels_valid)
# loss = F.binary_cross_entropy(input=inferences,target=labels_valid,reduction='mean')
loss.backward()
arch_optimizer.step()
# for parms in self.arch_parameters():
# print(parms,'-->grad_requirs:',parms.requires_grad,' -->grad_value:',parms.grad)
return loss
class OFM_TS(OFM):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__(feature_size, k, params, m_type)
self.TS_initial()
class NAS_TS(NAS):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__(feature_size, k, params, m_type)
self.TS_initial()
def step_ts(self, x, labels_valid, criterion, arch_optimizer, other):
# print(len(x),len(labels_valid))
self.zero_grad()
arch_optimizer.zero_grad()
# binarize before forward propagation
self.binarize()
_, inferences = self.forward_ts(x, 1, cal_kl=0)
loss = criterion(inferences, labels_valid)
# loss = F.binary_cross_entropy(input=inferences,target=labels_valid,reduction='mean')
loss.backward()
# restore weight before updating
self.recover()
arch_optimizer.step()
return loss
class SNAS_TS(SNAS):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__(feature_size, k, params, m_type)
self.TS_initial()
class DSNAS_TS(DSNAS):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__(feature_size, k, params, m_type)
self.TS_initial()
class Linucb(nn.Module):
def __init__(self, n=1, cnum=1, device='cuda'):
super().__init__()
self.alpha = 0.1
self.n =n
self.theta = nn.Parameter(torch.randn(cnum, n).to(device))
self.A_inv = nn.Parameter(torch.randn(cnum, n, n).to(device))
def forward(self, x):
ind = x[:,0].cpu().numpy().astype(int)
feature = x[:,1:].reshape(len(x),self.n)
mean = torch.mul(feature, self.theta[ind]).sum(1, keepdim=True)
fe1 = feature.reshape(len(x),1,self.n)
fe2 = feature.reshape(len(x),self.n,1)
std = self.alpha*torch.sqrt(torch.bmm(torch.bmm(fe1,self.A_inv[ind]),fe2).reshape(len(x),1))
return mean + std
def print():
return
| 27,434 | 44.123355 | 197 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/policy.py | import time
import torch
import pickle
import numpy as np
import pandas as pd
from torch.utils.data import WeightedRandomSampler
from train import Core
from train_arch import Train_Arch
from model import Linucb
def get_creative(params):
"return global creative list, and item-creatives dict"
with open(params['creative_list'], 'rb') as f:
c_list = pickle.load(f)
c_list = np.array(c_list, dtype=np.float32)
with open(params['item_candidates'], 'rb') as f:
item_candi = pickle.load(f)
with open(params["feature_list"], 'rb') as f:
f_list = pickle.load(f)
return f_list, c_list, item_candi
class Base(object):
def __init__(self, params):
self.f_list, self.c_list, self.item_candi = get_creative(params)
self.name = 'Base'
self.params = params
self.c_len = len(self.c_list[0])
self.f_len = len(self.f_list[0])
self.t_len = self.f_len+self.c_len
self.params["c_len"] = self.c_len
self.params["f_len"] = self.f_len
self.log_file = None
self.log_data = []
self.batch_data = []
self.fea_index = {}
self.c_index = {}
self.cnt = 0
cnt= 0
for cre in self.c_list:
self.c_index[','.join(str(x) for x in cre)] = cnt
cnt += 1
self.clock = [0 for _ in range(10)]
def update(self, lines, size=-1):
self.log_data.extend(lines)
self.batch_data = lines
if size!=-1 and len(self.log_data)>size:
self.log_data = self.log_data[-size:]
return 0
def update_in_log(self):
"""write log into file"""
if len(self.log_data)==0:
return
df = pd.DataFrame(self.log_data)
df.to_pickle(self.log_file)
def get_recommend_info(self, features_list, flag=0):
"""
Return several array for calculate
feas: distinct feature list which have not been displayed
offset: record the beginning of feature creatives
candidates: creatives of all features waiting for ranking
"""
feas = []
num = 0
for features in features_list:
ind = features[0]
if ind not in self.fea_index.keys():
self.fea_index[ind] = self.cnt
self.cnt += 1
feas.append(features)
num += len(self.item_candi[ind])
cnt = 0
f_len = len(features_list[0][1:])
# print(len(self.c_list[0]))
if flag == 0:
leng = f_len+len(self.c_list[0])
else:
leng = f_len+len(self.c_list[0])+1
candidates = np.zeros((num, leng),dtype=np.float32)
offset = [0]
last = 0
for features in feas:
t = np.zeros(leng)
if flag == 0:
t[0:f_len] = np.array(features[1:], dtype=np.float32)
for c_feature in self.item_candi[features[0]]:
t[f_len:] = self.c_list[c_feature]
candidates[cnt] = t
cnt+=1
else:
t[1:1+f_len] = np.array(features[1:], dtype=np.float32)
for c_feature in self.item_candi[features[0]]:
t[0] = np.float32(c_feature)
t[1+f_len:] = self.c_list[c_feature]
candidates[cnt] = t
cnt+=1
last = last+len(self.item_candi[features[0]])
offset.append(last)
return feas, offset, candidates
class Random(Base):
def __init__(self, params):
super(Random, self).__init__(params)
self.name = 'Random'
self.log_file = self.params["random_file"]
def update(self, lines, ind=None):
super(Random, self).update(lines)
return 0
def recommend_batch(self, features_list):
res = []
for features in features_list:
leng = len(self.item_candi[features[0]])
res.append(np.random.randint(0,leng))
return res
class Greedy(Base):
def __init__(self, params):
super(Greedy, self).__init__(params)
self.name = 'Greedy'
def update(self, lines, ind):
return 0
def recommend_batch(self, features_list, item_ctr):
res = []
for features in features_list:
res.append(item_ctr[features[0]].index(max(item_ctr[features[0]])))
return res
class FmEGreedy(Base):
def __init__(self, params):
super(FmEGreedy, self).__init__(params)
self.log_file = 'data/log/fm_log.pkl'
self.name = 'FmEGreedy'
self.fea_index = {}
self.res = []
self.flag = 0
self.update_cnt = 0
self.greedy_flag = 0
self.epsilon = self.params["alpha"]
self.model_nas = self.params["model_nas"]
self.model_struct = self.params["model_struct"]
# intial model if nas model, use Train_Arch() , else use Core()
if self.model_nas not in ["fm", "fm_ts"]:
self.framework = Train_Arch(dim=self.params["dim"],epoch=self.params["epoch"],weight_decay=self.params["decay"],data_size=self.params["data_size"], train_scale=1, valid_scale=0, device=self.params["device"], params=self.params)
else:
self.framework = Core(dim=self.params["dim"],epoch=self.params["epoch"],weight_decay=self.params["decay"],data_size=self.params["data_size"],train_scale= 1, valid_scale=0, device=self.params["device"], params=self.params)
# get test dateset to calculate auc
if self.params["auc_record"] == 1:
_, _, self.test_data = self.framework.set_dataloader(dataset_path=self.params["random_file"], data_size=100000,train_scale=0,valid_scale=0)
def update(self, lines, ind=None):
self.update_cnt += len(lines)
if self.name == "FmTS":
self.framework.epoch = self.framework.params["epoch"]-int((len(self.log_data)/len(lines))*3.5)
print("epoch:", self.framework.epoch)
# update == 0, the struct of model will update frequently
if self.params["update"] == 0:
super(FmEGreedy, self).update(lines)
self.framework.set_dataloader(dataset=self.log_data, dataset_path=self.log_file)
self.framework.initial_model(self.model_nas, self.model_struct, optimizer=self.params["optimizer"])
self.framework.run()
# update == 1, the struct of model will update several batch a time
elif self.params["update"] == 1:
super(FmEGreedy, self).update(lines)
if self.update_cnt > 49999 or self.flag ==0:
self.framework.params["trick"]=1
self.framework.epoch = self.framework.params["epoch"]
self.framework.set_dataloader(dataset=self.log_data, dataset_path=self.log_file)
self.framework.initial_model(self.model_nas, self.model_struct, optimizer=self.params["optimizer"])
self.framework.run()
self.framework.params["arch"] = [self.framework.model._arch_parameters['binary'][i, :].argmax().item() for i in range(len(self.framework.model._arch_parameters['binary']))]
self.flag, self.update_cnt = 1, 0
else:
self.framework.epoch = 200
self.framework.params["trick"]=0
if self.name == "FmTS":
self.framework.epoch = 180-int(len(self.log_data)/len(lines))*6
self.framework.set_dataloader(dataset=self.log_data, dataset_path=self.log_file)
self.framework.initial_model(self.model_nas, self.model_struct, optimizer=self.params["optimizer"])
# if self.flag == 0:
# self.framework.params["arch"] = [1 for i in range(len(self.framework.model._arch_parameters['binary']))]
self.framework.run()
self.fea_index = {}
self.res = []
self.cnt = 0
if self.params["auc_record"] == 1:
return(self.framework.test(self.test_data))
else:
return 0
def recommend_batch(self, features_list):
feas, offset, candidates = super().get_recommend_info(features_list)
if len(candidates) != 0:
rank = self.framework.result(candidates)
for i in range(len(feas)):
k = np.argmax(rank[offset[i]:offset[i+1]])
self.res.append(k)
final = []
for features in features_list:
if np.random.rand() > self.epsilon:
final.append(self.res[self.fea_index[features[0]]])
else:
leng = len(self.item_candi[features[0]])
final.append(np.random.randint(0,leng))
return final
class FmThompson(FmEGreedy):
def __init__(self, params):
if "_ts" not in params["model_nas"]:
params["model_nas"] = params["model_nas"]+"_ts"
super(FmThompson, self).__init__(params)
self.log_file = 'data/log/fmTS_log.pkl'
self.name = 'FmTS'
self.epsilon = 0
def update(self, lines, ind=None):
return super(FmThompson, self).update(lines)
def recommend_batch(self, features):
return super(FmThompson, self).recommend_batch(features)
class FmGreedy(FmEGreedy):
def update(self, lines, ind):
if self.greedy_flag == 1:
return 0
self.log_file = self.params["random_file"]
self.greedy_flag = 1
self.framework.set_dataloader(dataset=None, dataset_path=self.log_file)
self.framework.initial_model(self.model_nas, self.model_struct, optimizer=self.params["optimizer"])
self.framework.run()
_, _, self.test_data = self.framework.set_dataloader(dataset_path=self.params["random_file"], data_size=100000,train_scale=0,valid_scale=0, flag=1)
if self.params["auc_record"] == 1:
return(self.framework.test(self.test_data))
else:
return 0
class LinUCB(Base):
def __init__(self, params):
super(LinUCB, self).__init__(params)
self.log_file = 'data/log/linucb_log.pkl'
self.name = 'Linucb'
self.device = params['device']
self.r_index = {}
self.c_num = len(self.c_list)
self.leng = self.t_len
self.alpha = self.params["alpha"]
self.cnt = 0
self.t = 0
self.Aa = np.zeros((self.c_num, self.leng, self.leng))
self.Aa_inv = np.zeros((self.c_num, self.leng, self.leng))
self.ba = np.zeros((self.c_num, self.leng, 1))
self.theta = np.zeros((self.c_num, self.leng, 1))
for i in range(self.c_num):
self.Aa[i] = np.identity(self.leng)
self.Aa_inv[i] = np.identity(self.leng)
self.ba[i] = np.zeros((self.leng, 1))
self.theta[i] = np.zeros((self.leng, 1))
self.model = Linucb(self.leng, self.c_num, self.device)
def update(self, lines, ind=None):
# super(LinUCB, self).update(lines)
used = np.zeros(self.c_num)
# print(self.c_index)
for line in lines:
curr = self.c_index[','.join(str(float(x)) for x in line[-1-self.c_len:-1])]
used[curr] = 1
x = np.array(line[0:-1]).reshape((self.leng, 1))
# print(x)
reward = line[-1]
t = np.outer(x, x)
self.Aa[curr] += t
self.ba[curr] += reward * x
self.Aa_inv[curr] = self.Aa_inv[curr] - np.matmul(self.Aa_inv[curr].dot(x),x.T.dot(self.Aa_inv[curr]))/(1 + np.matmul(x.T, self.Aa_inv[curr].dot(x)))
for curr in range(self.c_num):
if used[curr] == 1:
# self.Aa_inv[curr] = np.linalg.inv(self.Aa[curr])
self.theta[curr] = self.Aa_inv[curr].dot(self.ba[curr])
for i, (_, param) in enumerate(self.model.named_parameters()):
if i ==0:
param.data = torch.from_numpy(self.theta.reshape(self.c_num,self.leng).astype(np.float32)).to(self.device)
if i == 1:
param.data = torch.from_numpy(self.Aa_inv.astype(np.float32)).to(self.device)
self.r_index = {}
self.fea_index = {}
return 0
def recommend_batch(self, features_list):
feas, offset, candidates = super().get_recommend_info(features_list, 1)
rank = self.model(torch.from_numpy(candidates).to(self.device))
rank = rank.detach().cpu().numpy().reshape(len(rank))
for i in range(len(feas)):
k = np.argmax(rank[offset[i]:offset[i+1]])
self.r_index[feas[i][0]] = k
final = []
for features in features_list:
final.append(self.r_index[features[0]])
return final
class TS(LinUCB):
def __init__(self, params):
super(TS, self).__init__(params)
self.log_file = 'data/log/ts_log.pkl'
self.name = 'ts'
self.mean = None
self.std = None
self.alpha = self.params["alpha"]
def update(self, lines, ind=None):
for line in lines:
curr = self.c_index[','.join(str(float(x)) for x in line[-1-self.c_len:-1])]
x = np.array(line[0:-1]).reshape((self.leng, 1))
reward = line[-1]
t = np.outer(x, x)
self.Aa[curr] += t
self.ba[curr] += reward * x
self.Aa_inv[curr] = self.Aa_inv[curr] - np.matmul(self.Aa_inv[curr].dot(x),x.T.dot(self.Aa_inv[curr]))/(1 + x.T.dot(self.Aa_inv[curr]).dot(x))
t =self.Aa_inv[curr].dot(self.ba[curr])
self.theta[curr] = t
self.r_index = {}
self.fea_index = {}
self.mean = torch.from_numpy(self.theta).reshape(self.c_num, self.leng)
temp = np.array([np.diag(a) for a in self.Aa_inv])
self.std = torch.from_numpy(temp.reshape((self.c_num, self.leng)))
def recommend_batch(self, features_list):
theta = torch.normal(self.mean, self.alpha*self.std).numpy().reshape((self.c_num, self.leng))
feas, offset, candidates = super().get_recommend_info(features_list)
for i in range(len(feas)):
ind = feas[i][0]
c_num = len(self.item_candi[ind])
res = np.zeros(c_num)
for j in range(c_num):
res[j] = theta[self.item_candi[ind][j]].T.dot(candidates[offset[i]+j])
k = np.argmax(res)
self.r_index[ind] = k
final = []
for features in features_list:
final.append(self.r_index[features[0]])
return final | 14,589 | 39.082418 | 239 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/utils_gen.py | from train import Core
from train_arch import Train_Arch
import argparse
import pickle
import numpy as np
import torch
dataset_name = "data_nas"
def gen_simulation_ctr():
c_list = None
with open("raw_data/"+dataset_name+"/creative_list.pkl", 'rb') as f:
c_list = pickle.load(f)
c_list = np.array(c_list, dtype=np.float32)
item_candi = None
with open("/raw_data/"+dataset_name+"/item_candidates.pkl", 'rb') as f:
item_candi = pickle.load(f)
f_list = None
with open("raw_data/"+dataset_name+"/feature_list.pkl", 'rb') as f:
f_list = pickle.load(f)
params = {"device": "cuda",
"alpha": 0.0,
"feature_size": "raw_data/"+dataset_name+"/feature_size.pkl",
"model_struct": 3,
"model_nas": "nas",
}
framework = Train_Arch(dim=8,
weight_decay=0.001,
data_size= -1,
device="cuda",
epoch=50,
params=params)
model_path = "raw_data/data_nas/rand_0.15_3/model/fm_nas.pt"
framework.load_model(model_path)
framework.model.genotype()
# print(framework.model.named_parameters)
# for i in range(len(framework.model._arch_parameters["binary"])):
# framework.model._arch_parameters["binary"][i, np.random.randint(0,5)] = 1
arch = [framework.model._arch_parameters['binary'][i, :].argmax().item()
for i in range(len(framework.model._arch_parameters['binary']))]
print(arch)
framework.model.genotype()
f_len = len(f_list[0])
leng = len(f_list[0])+len(c_list[0])
num = 0
for ind in range(len(f_list)):
num += len(item_candi[ind])
candidates = np.zeros((num, leng),dtype=np.float32)
cnt = 0
for ind in range(len(f_list)):
t = np.zeros(leng)
t[0:f_len] = np.array(f_list[ind], dtype=np.float32)
# print(item_candi[ind])
for c_feature in item_candi[ind]:
t[f_len:] = c_list[c_feature]
candidates[cnt] = t
cnt += 1
# print(candidates.shape)
rank = framework.result(candidates)
# print(rank)
# for name,par in framework.model.named_parameters():
# print(name,par)
ctr_list = []
item_ctr = {}
cnt = 0
for ind in range(len(f_list)):
item_ctr[ind] = []
for c_feature in item_candi[ind]:
item_ctr[ind].append(rank[cnt])
ctr_list.append(rank[cnt])
cnt += 1
ctr_list = sorted(ctr_list)
print('low: ',ctr_list[:10])
print('high: ',ctr_list[-10:])
radio = 0
for ind in range(len(f_list)):
# print(item_ctr[ind])
c = sorted(item_ctr[ind])
a = c[-1]/c[int(len(c)/2)] - 1
radio += a
radio /= len(f_list)
print(radio)
k = 2
cnt = 0
res = []
for name, parameter in framework.model.named_parameters():
parameter[:] = parameter * 1.3
cnt += 1
res.append(parameter)
rank = framework.result(candidates)
ctr_list = []
item_ctr = {}
cnt = 0
for ind in range(len(f_list)):
item_ctr[ind] = []
for c_feature in item_candi[ind]:
item_ctr[ind].append(rank[cnt])
ctr_list.append(rank[cnt])
cnt += 1
ctr_list = sorted(ctr_list)
print('low: ',ctr_list[0:10])
print('high: ',ctr_list[-10:])
radio = 0
for ind in range(len(f_list)):
c = sorted(item_ctr[ind])
a = c[-1]/c[int(len(c)/2)] - 1
radio += a
radio /= len(f_list)
print(radio)
if __name__ == "__main__":
gen_simulation_ctr() | 3,692 | 30.29661 | 88 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/ctr.py | import numpy as np
import pandas as pd
import torch.utils.data
import itertools
import tqdm
import time
def get_index(dataset, dense_list=None):
emb_index = []
cnt = 0
dim = 0
for i in range(len(dataset)):
emb_index.append(dict())
if i in dense_list:
dim += 1
continue
ind = dataset[i].value_counts().index
size = len(ind)
for key in ind:
emb_index[i][key] = cnt
cnt += 1
dim += 1
return emb_index, dim
def get_field_info(dataset, dense=[0,0]):
feature_size = []
for i in range(dense[1]+dense[0]):
if i < dense[0]:
feature_size.append(1)
continue
ind = dataset[i].value_counts().index
print(ind)
size = len(ind)
feature_size.append(size)
return feature_size
class DirectDataset(torch.utils.data.Dataset):
def __init__(self, dataset_r):
dataset = np.array(dataset_r)
print(len(dataset))
self.items = dataset[:, 0:-1].astype(np.float32)
self.n = len(self.items[0])
self.targets = dataset[:, -1].astype(np.float32)
def __len__(self):
return self.targets.shape[0]
def __getitem__(self, index):
return self.items[index], self.targets[index]
class TfsDataset(torch.utils.data.Dataset):
def __init__(self, dataset_path, num=-1):
df = pd.read_pickle(dataset_path)
data = df.to_numpy()
self.items = data[:num, 0:-1].astype(np.float32)
(a, b) = np.shape(self.items)
print(a, b)
self.n = b
print(data[:,-1])
self.targets = self.__preprocess_target(data[:num, -1].astype(np.float32)).astype(np.float32)
# self.field_dims = np.max(self.items, axis50=0) + 1
# self.field_dims = np.array([2]*b)
self.user_field_idx = np.array((0,), dtype=np.float32)
self.item_field_idx = np.array((1,), dtype=np.float32)
def __len__(self):
return self.targets.shape[0]
def __getitem__(self, index):
return self.items[index], self.targets[index]
def __preprocess_target(self, target):
target[target <= 0] = 0
target[target > 0] = 1
return target
class DenseDataset(torch.utils.data.Dataset):
def __init__(self, dataset_path, num=-1, dense=[10,6,0], flag=0):
print(dataset_path)
df = pd.read_pickle(dataset_path)
data = df.to_numpy()
# np.random.shuffle(data)
if flag == 0:
self.items = data[:num, 0:-1].astype(np.float32)
self.targets = data[:num, -1].astype(np.float32)
else:
self.items = data[-1-num:-1, 0:-1].astype(np.float32)
self.targets = data[-1-num:-1, -1].astype(np.float32)
(a, b) = np.shape(self.items)
print(a, b)
print(data[:,-1])
# self.embedding_ind, dim = get_index(self.items, [0])
# self.feature_size = get_field_info(df, dense)
# get_field_info(df, dense)
# self.feature_size.append(dense[2])
# self.feature_size[4] = 3952
# import pickle
# with open(path, 'wb') as d:
# pickle.dump(self.feature_size, d)
# print(self.feature_size)
# self.n = dim
def __len__(self):
return self.targets.shape[0]
def __getitem__(self, index):
return self.items[index], self.targets[index]
if __name__=="__main__":
ds = DenseDataset('raw_data/data_movie_log/log.pkl')
| 3,515 | 29.310345 | 101 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/train_arch.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import argparse
from model import NAS, NAS_TS, SNAS, DSNAS, SNAS_TS, DSNAS_TS
from ctr import TfsDataset, DirectDataset, DenseDataset
from torch.utils.data import DataLoader
import time
from utils import DataPrefetcher
from sklearn.metrics import roc_auc_score, log_loss
import pickle
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def get_dataset(path, name, num=-1, flag=0):
if name == 'tfs':
return TfsDataset(path, num)
if name == 'direct':
return DirectDataset(path)
if name == 'embedded':
return DenseDataset(path, num, flag=flag)
class Train_Arch():
def __init__(self,
dim=20,
weight_decay=0.001,
data_size = -1,
epoch=30,
train_scale=0.4,
valid_scale=0.4,
learning_rate=0.001,
batch_size=1024,
device='cuda',
params=None):
self.device = torch.device(device)
self.dim = dim
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.params = params
self.model_nas = None
self.data_size = data_size
self.train_scale = train_scale
self.valid_scale = valid_scale
with open(params['feature_size'], 'rb') as f:
self.feature_size = pickle.load(f)
print(self.feature_size)
self.clock = [0 for _ in range(10)]
self.run_first = 0
self.last_arch = None
self.model_path = self.params["model_path"]
def set_dataloader(self, dataset=None, dataset_path=None, data_size=-1,train_scale=-1, valid_scale=-1, flag=0):
if train_scale == -1:
data_size = self.data_size
train_scale = self.train_scale
valid_scale = self.valid_scale
self.log_data = dataset
self.data_path = dataset_path
if dataset == None:
self.dataset = get_dataset(dataset_path, 'embedded', data_size, flag)
else:
self.dataset = get_dataset(dataset, 'direct')
train_length = int(len(self.dataset) * train_scale)
valid_length = int(len(self.dataset) * valid_scale)
test_length = len(self.dataset) - train_length - valid_length
train_dataset, temp_dataset = torch.utils.data.random_split(
self.dataset, (train_length, len(self.dataset) - train_length))
valid_dataset, test_dataset = torch.utils.data.random_split(
temp_dataset, (valid_length, test_length))
self.train_data_loader = DataLoader(train_dataset, batch_size=self.batch_size, num_workers=32)
# valid_size = int(len(valid_dataset)/len(train_data_loader))+1
self.valid_data_loader = DataLoader(valid_dataset, batch_size=self.batch_size, num_workers=32)
self.test_data_loader = DataLoader(test_dataset, batch_size=self.batch_size, num_workers=32)
return self.train_data_loader, self.valid_data_loader, self.test_data_loader
def get_model(self, model_nas, model_struct):
if model_struct not in [0,1,2,3]:
print("no model struct %s in model nas %s class!!!", model_struct, model_nas)
exit()
if model_nas == "nas":
return NAS(self.feature_size, self.dim, self.params, m_type=model_struct)
elif model_nas == "snas":
return SNAS(self.feature_size, self.dim, self.params, m_type=model_struct)
elif model_nas == "dsnas":
return DSNAS(self.feature_size, self.dim, self.params, m_type=model_struct)
elif model_nas == "nas_ts":
return NAS_TS(self.feature_size, self.dim, self.params, m_type=model_struct)
elif model_nas == "snas_ts":
return SNAS_TS(self.feature_size, self.dim, self.params, m_type=model_struct)
elif model_nas == "dsnas_ts":
return DSNAS_TS(self.feature_size, self.dim, self.params, m_type=model_struct)
else:
print("no model named %s in nas train class!!!", model_nas)
exit()
def initial_model(self, model_nas, model_struct, optimizer='adam'):
self.model_nas = model_nas
self.model = self.get_model(model_nas, model_struct).to(self.device)
# print(1)
self.optimizer = torch.optim.Adam(params=self.model.parameters(),
lr=self.learning_rate,
weight_decay=self.weight_decay)
# self.optimizer = torch.optim.Adagrad(params=self.model.parameters(),lr=self.learning_rate, weight_decay=self.weight_decay)
self.arch_optimizer = torch.optim.Adam(params=self.model.arch_parameters(),
lr=self.learning_rate,
weight_decay=self.weight_decay)
if self.params["simulate_type"]=="click":
self.criterion = torch.nn.BCELoss() #reduction="sum"
else:
# self.criterion = torch.nn.MSELoss()
self.criterion = F.binary_cross_entropy
def train_arch(self, train_type=0):
self.model.train()
losses = []
prefetcher = DataPrefetcher(self.train_data_loader, self.device)
total_y =[0,0]
clock = [0 for _ in range(10)]
clock[0] -= time.time()
step = 0
while 1:
t=1
clock[1] -= time.time()
train_data = prefetcher.next()
temperature = 2.5 * np.exp(-0.036 * step)
if train_data == None:
clock[1] += time.time()
break
(fields, target) = train_data
fields, target = fields.to(self.device), target.to(self.device)
clock[1] += time.time()
if train_type == 0:
if "dsnas" in self.model_nas:
others = self.optimizer
elif "snas" in self.model_nas:
others = temperature
else:
others = 0
if "_ts" in self.model_nas:
loss = self.model.step_ts(fields, target, self.criterion, self.arch_optimizer, others)
else:
loss = self.model.step(fields, target, self.criterion, self.arch_optimizer, others)
losses.append(loss.cpu().detach().item())
else:
self.optimizer.zero_grad()
self.arch_optimizer.zero_grad()
if self.model_nas== "snas":
self.model.binarize(temperature)
y = self.model(fields, 1)
loss = self.criterion(y, target.float())
elif "_ts" in self.model_nas:
self.model.binarize()
if train_type == 1:
k, y = self.model.forward_ts(fields, 1)
elif train_type == 2:
k, y = self.model.forward_ts(fields, 0)
# loss_t = F.binary_cross_entropy(input=y,target=target,reduction='sum')
loss_t = self.criterion(y, target.float())*self.batch_size
alpha = 1/len(self.train_data_loader)
loss = alpha * k + loss_t
total_y[0]+=k
total_y[1]+=loss_t
else:
self.model.binarize()
if train_type == 1:
y = self.model(fields, 1)
elif train_type == 2:
y = self.model(fields, 0)
loss = self.criterion(y, target.float())
loss.backward()
self.model.recover()
self.optimizer.step()
losses.append(loss.cpu().detach().item())
clock[0] += time.time()
print('cost:', [round(c, 5) for c in clock[0:7]])
# print("model cost:", [round(c, 5) for c in self.model.cost])
self.model.cost = [0 for _ in range(6)]
# g, gp = self.model.genotype()
# print('genotype: %s' % g)
# print('genotype_p: %s' % gp)
if "nas_ts" in self.model_nas:
print('kl distance ', total_y[0].item() / len(self.train_data_loader),'bce loss ', total_y[1].item() / len(self.train_data_loader))
print('---- loss:', np.mean(losses))
return np.mean(losses)
def run(self):
print("run!!!!!!")
if self.params["trick"] == 0:
arch = self.params["arch"]
for j in range(len(self.model._arch_parameters["binary"])):
self.model._arch_parameters["binary"].data[j, arch[j]] = 1
best_loss = 1000000
stop_cnt = 0
self.model.genotype()
for epoch_i in range(self.epoch):
print(epoch_i, end=' ')
loss = self.train_arch(train_type=2)
if len(self.test_data_loader)>10:
self.test(self.test_data_loader)
if best_loss - 0.000001 < loss:
stop_cnt += 1
if stop_cnt > 6:
break
else:
best_loss = loss
self.save_model(self.model_path)
stop_cnt = 0
self.load_model(self.model_path)
if len(self.test_data_loader)>10:
self.test(self.test_data_loader)
elif self.params["trick"] == 1:
for epoch_i in range(self.epoch):
print(epoch_i, end=' ')
self.train_arch()
self.model.genotype()
self.train_arch(train_type=2)
if len(self.test_data_loader)> 10:
self.test(self.test_data_loader)
best_loss = 1000000
stop_cnt = 0
for i in range(100):
print(i, end=' ')
loss = self.train_arch(train_type=2)
if best_loss - 0.000001 < loss:
stop_cnt += 1
if stop_cnt > 6:
break
else:
best_loss = loss
self.save_model(self.model_path)
stop_cnt = 0
self.load_model(self.model_path)
def test(self, data_loader):
self.model.eval()
self.model.binarize()
targets, predicts, id_list = list(), list(), list()
loss = 0
with torch.no_grad():
for fields, target in data_loader:
fields, target = fields.to(self.device), target.to(self.device)
y = self.model(fields, 0)
loss += F.binary_cross_entropy(input=y,target=target,reduction='mean')
targets.extend(target.tolist())
predicts.extend(y.tolist())
self.model.recover()
auc = roc_auc_score(targets, predicts)
# print("loss: ", loss.item()/len(data_loader))
print("auc: ", auc)#, " g_auc:", cal_group_auc(targets, predicts, id_list)
# print("bce:", torch.nn.functional.binary_cross_entropy(input=predicts,target=target,reduction='mean'))
return auc
def result(self, candidates):
self.model.eval()
self.model.binarize()
candidates = torch.from_numpy(candidates).to(self.device)
if "_ts" in self.model_nas:
_, ranking = self.model.forward_ts(candidates, 0, cal_kl=0)
else:
ranking = self.model(candidates, 0)
self.model.recover()
return ranking.detach().cpu().numpy()
def save_model(self, path):
torch.save(self.model, path)
def load_model(self, path):
self.model_nas = self.params["model_nas"]
self.model = torch.load(path)
def print_w(self):
self.model.print()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', type=str, default='raw_data/dataset/dataset_test.pkl')
parser.add_argument('--feature_size', type=str, default='raw_data/data_online/feature_size.pkl')
parser.add_argument('--dim', type=int, default=8)
parser.add_argument('--decay', type=float, default=0.001)
parser.add_argument('--learning', type=float, default=0.001)
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--epoch", type=int, default=50)
parser.add_argument("--dataset", type=int, default=-1)
parser.add_argument("--times",type=int,default=1)
parser.add_argument("--optimizer",type= str, default="adam")
parser.add_argument('--model_struct', type=int, default=0)
parser.add_argument('--model_nas', type=str, default="fm")
args = parser.parse_args()
params = {"device": args.device,
"alpha": 0.0,
"dense": [10,6,0],
'feature_size': args.feature_size,
"operator": "multiply",
'trick': 2,
"model_struct": args.model_struct,
"model_nas": args.model_nas,
"first_order": 1,
"calcu_dense": 0,
"f_len": 12,
"early_fix_arch": False,
"simulate_type":"click",
'arch': [4,4,1,4,4,4,2,2,4,4,1,1,1,1,1]} # [1,0,2,0,3,0,3,1,2,2,0,3,0,3,1,0,1,2,3,0,1,2]
# [1,0,2,0,3,0,3,1,2,2,0,3,0,3,1][4,4,1,4,4,4,2,2,4,4,1,1,1,1,1]
framework = Train_Arch(dim=int(args.dim),
weight_decay= float(args.decay),
data_size=int(args.dataset),
device=args.device,
train_scale=0.8,
valid_scale=0,
epoch=int(args.epoch),
params=params)
result = []
for i in range(args.times):
search_start = time.time()
framework.set_dataloader(dataset_path=args.dataset_path)
framework.initial_model(args.model_nas, args.model_struct, "adam")
# framework.params["trick"] = 0
# framework.epoch = int(args.epoch)
# framework.train_scale=0.4
# framework.valid_scale=0.4
# framework.set_dataloader(dataset_path=args.dataset_path)
# # for j in range(len(framework.model._arch_parameters["binary"])):
# # framework.model._arch_parameters["binary"].data[j, np.random.randint(0,3)] = 1
framework.params["arch"] = [1 for i in range(framework.model.multi_len)]
framework.run()
framework.model.genotype()
# framework.test(framework.train_data_loader)
# model_path = "../dataset/data_nas/raw_data/data_nas/rand_0.15_3/model/fm_nas.pt"
# framework.save_model(model_path)
print("---------------------")
print("cost: ", time.time()-search_start)
print("---------------------")
# # framework.load_model(model_path)
# arch = [framework.model._arch_parameters['binary'][i, :].argmax().item()
# for i in range(len(framework.model._arch_parameters['binary']))]
# # framework.params["arch"] = arch
# framework.params["trick"] = 2
# framework.epoch = 30
# framework.train_scale=0.8
# framework.valid_scale=0
# framework.set_dataloader(dataset_path=args.dataset_path)
# framework.run()
result.append(framework.model.genotype())
result.append(framework.test(framework.train_data_loader))
print(result)
| 15,572 | 42.744382 | 143 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/train.py | import os
import time
import torch
import pickle
import numpy as np
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, log_loss
from torch.utils.data import DataLoader
from ctr import TfsDataset, DirectDataset, DenseDataset
from model import OFM, OFM_TS
from utils import cal_group_auc, FTRL
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def get_dataset(path, name, num=-1, flag=0):
if name == 'tfs':
return TfsDataset(path, num)
if name == 'direct':
return DirectDataset(path)
if name == 'embedded':
return DenseDataset(path, num, flag=flag)
class Core(object):
def __init__(self,
dim=20,
weight_decay=0.001,
data_size = -1,
epoch=30,
train_scale=0.4,
valid_scale=0.4,
learning_rate=0.001,
batch_size=1024,
device='cuda',
params=None):
self.device = torch.device(device)
self.dim = dim
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.params = params
self.model_nas = None
self.data_size = data_size
self.train_scale = train_scale
self.valid_scale = valid_scale
with open(params['feature_size'], 'rb') as f:
self.feature_size = pickle.load(f)
self.clock = [0 for _ in range(10)]
def set_dataloader(self, dataset=None, dataset_path=None, data_size=-1,train_scale=-1, valid_scale=-1, flag=0):
"split data into 3 part, train, test, valid"
if train_scale == -1:
data_size = self.data_size
train_scale = self.train_scale
valid_scale = self.valid_scale
self.log_data = dataset
self.data_path = dataset_path
if dataset == None:
self.dataset = get_dataset(dataset_path, 'embedded', data_size, flag)
else:
self.dataset = get_dataset(dataset, 'direct')
train_length = int(len(self.dataset) * train_scale)
valid_length = int(len(self.dataset) * valid_scale)
test_length = len(self.dataset) - train_length - valid_length
train_dataset, temp_dataset = torch.utils.data.random_split(
self.dataset, (train_length, len(self.dataset) - train_length))
valid_dataset, test_dataset = torch.utils.data.random_split(
temp_dataset, (valid_length, test_length))
self.train_data_loader = DataLoader(train_dataset, batch_size=self.batch_size, num_workers=32)
# valid_size = int(len(valid_dataset)/len(train_data_loader))+1
self.valid_data_loader = DataLoader(valid_dataset, batch_size=self.batch_size, num_workers=32)
self.test_data_loader = DataLoader(test_dataset, batch_size=self.batch_size, num_workers=32)
return self.train_data_loader, self.valid_data_loader, self.test_data_loader
def get_model(self, model_nas, model_struct):
if model_struct not in [0,2]:
print("no model struct %s in model nas %s class!!!", model_struct, model_nas)
exit()
if model_nas == "fm":
model = OFM(self.feature_size, self.dim, self.params, m_type=model_struct)
model.setotype(self.params["operator"])
return model
elif model_nas == "fm_ts":
model = OFM_TS(self.feature_size, self.dim, self.params, m_type=model_struct)
model.setotype(self.params["operator"])
return model
else:
print("no model named %s in train class!!!", model_nas)
exit()
def initial_model(self, model_nas, model_struct, optimizer='adam'):
self.model_nas = model_nas
self.model = self.get_model(model_nas, model_struct).to(self.device)
# print(1)
if self.params["simulate_type"]=="click":
self.criterion = torch.nn.BCELoss() #reduction="sum"
else:
self.criterion = F.binary_cross_entropy
# self.criterion = torch.nn.MSELoss()
if optimizer == 'adam':
self.optimizer = torch.optim.Adam(params=self.model.parameters(),
lr=self.learning_rate, weight_decay=self.weight_decay)
elif optimizer == 'ftrl':
self.optimizer = FTRL(params=self.model.parameters(),
alpha=1.0, beta=1.0, l1=0.001, l2=0.001)
def train(self, data_loader, flag=0):
self.model.train()
total_loss = 0
total_y =[0,0]
M = len(data_loader)
self.clock[0] -= time.time()
for i, (fields, target) in enumerate(data_loader):
self.clock[1] -= time.time()
fields, target = fields.to(self.device), target.to(self.device)
self.clock[1] += time.time()
self.clock[2] -= time.time()
if self.model_nas == "fm_ts":
k, y = self.model.forward_ts(fields, 0)
# loss_t = F.binary_cross_entropy(input=y,target=target,reduction='sum')
loss_t = self.criterion(y, target.float())*self.batch_size
alpha = 1/len(data_loader)
loss = alpha * k + loss_t
total_y[0]+=k
total_y[1]+=loss_t
else:
y = self.model(fields, 0)
loss = self.criterion(y, target.float())
self.clock[2] += time.time()
self.clock[3] -= time.time()
self.model.zero_grad()
loss.backward()
self.clock[3] += time.time()
self.clock[4] -= time.time()
self.optimizer.step()
self.clock[4] += time.time()
total_loss += loss.item()
self.clock[0] += time.time()
if self.model_nas == "fm_ts":
print('kl distance ', total_y[0].item() / len(data_loader),'bce loss ', total_y[1].item() / len(data_loader))
print('------ loss:', total_loss / len(data_loader))
print([round(c, 5) for c in self.clock[0:7]])
# print(self.model.cost)
self.model.cost = [0 for _ in range(6)]
self.clock = [0 for _ in range(10)]
return total_loss / len(data_loader)
def test(self, data_loader):
self.model.eval()
targets, predicts, id_list = list(), list(), list()
loss = 0
with torch.no_grad():
for fields, target in data_loader:
fields, target = fields.to(self.device), target.to(self.device)
if self.model_nas == "fm_ts":
_,y = self.model.forward_ts(fields, 0, cal_kl=1)
else:
y = self.model(fields, 0)
loss += torch.nn.functional.binary_cross_entropy(input=y,target=target,reduction='sum')
targets.extend(target.tolist())
predicts.extend(y.tolist())
for i in fields.tolist():
id_list.append(",".join([str(s) for s in i[:68]]))
# print("loss: ", loss.item()/len(data_loader))
auc = roc_auc_score(targets, predicts)
print("auc: ", auc)#, " g_auc:", cal_group_auc(targets, predicts, id_list)
# print("bce:", torch.nn.functional.binary_cross_entropy(input=predicts,target=target,reduction='mean'))
return auc
def run(self, flag=0):
epoch = self.epoch
start = time.time()
stop_cnt = 0
best_loss = 10000000
for i in range(epoch):
print(i, end=" ")
loss = self.train(self.train_data_loader)
if best_loss - 0.000001 < loss:
stop_cnt += 1
if stop_cnt > 6:
break
else:
best_loss = loss
stop_cnt = 0
print(len(self.test_data_loader))
if len(self.test_data_loader)> 10:
self.test(self.test_data_loader)
# if i%10==0 or epoch-i<4:
# self.print_w()
# if i%10 == 9:
# print('epoch:', i + 1)
print("cost time: ", time.time()-start)
def result(self, candidates):
self.model.eval()
candidates = torch.from_numpy(candidates).to(self.device)
if self.model_nas == "fm_ts":
_, ranking = self.model.forward_ts(candidates, 0, cal_kl=0)
else:
ranking = self.model(candidates, 0)
return ranking.detach().cpu().numpy()
def print_w(self):
self.model.print()
def save_model(self, path):
torch.save(self.model, path)
def load_model(self, path):
self.model = torch.load(path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', type=str, default='raw_data/dataset/dataset_1400.pkl')
parser.add_argument('--feature_size', type=str, default='raw_data/data_1400w/feature_size_id.pkl')
parser.add_argument('--dim', type=int, default=8)
parser.add_argument('--decay', type=float, default=0.001)
parser.add_argument('--learning', type=float, default=0.001)
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--epoch", type=int, default=15)
parser.add_argument("--dataset", type=int, default=-1)
parser.add_argument("--times",type=int,default=1)
parser.add_argument("--optimizer",type= str, default="adam")
parser.add_argument("--model", type=str, default="fm")
parser.add_argument("--oper", type=str, default="multiply")
args = parser.parse_args()
params = {"device": args.device,
"alpha": 0.0,
"dense": [0,5,18],
'feature_size': args.feature_size,
"operator": args.oper,
'trick': 2,
"first_order": 1,
"calcu_dense": 0,
'arch':[1,0,2,0,3,0,3,1,2,2,0,3,0,3,1] } # [1,0,2,0,3,0,3,1,2,2,0,3,0,3,1,0,1,2,3,0,1,2]
framework = Core(dim=int(args.dim),
weight_decay= float(args.decay),
data_size=int(args.dataset),
device=args.device,
split_param=0.8,
epoch=int(args.epoch),
params=params)
framework.set_dataloader(dataset_path=args.dataset_path)
for i in range(args.times):
search_start = time.time()
framework.initial_model(args.model)
# for j in range(len(framework.model._arch_parameters["binary"])):
# framework.model._arch_parameters["binary"].data[j, np.random.randint(0,3)] = 1
framework.run()
# framework.model.genotype()
framework.test(framework.train_data_loader)
print("---------------------")
print("cost: ", time.time()-search_start)
print("---------------------")
# framework.load_model(model_path)
framework.print_w()
# with open("../dataset/data_nas/raw_data/data_nas/model_param_0.15.pkl", 'rb') as f:
# param = torch.load(f)
# cnt = 0
# for name, parameter in framework.model.named_parameters():
# parameter[:] = param[cnt]
# cnt += 1
# framework.test(framework.train_data_loader)
| 11,347 | 39.820144 | 121 | py |
AutoCO | AutoCO-main/exp_public/mushroom/data/simulate_process.py | import numpy as np
import pandas as pd
data = pd.read_csv("data.txt", header=None)
data.columns = ["label", "cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
"gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
"stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type",
"veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
N = 50000
sample_indices = np.random.randint(0, data.shape[0], size=N)
data_sample = data.loc[sample_indices]
def encode(data, column):
label = sorted(data[column].unique(), key=lambda x:x)
label_indices = list(range(len(label)))
label_dict = dict(zip(label, label_indices))
data.loc[:, column] = data[column].map(label_dict)
for name in data_sample.columns:
encode(data_sample, name)
def reward(x):
if x == 0:
return 5
elif x == 1:
if np.random.random() >= 0.5:
return 5
else:
return -35
else:
return -1
#revised the label, 0 is good,1 is poison
data_sample["eat_reward"] = data_sample["label"].map(lambda x: reward(x))
data_sample["noteat_reward"] = 0
data_sample = data_sample[["cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
"gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
"stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type",
"veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat", "eat_reward", "noteat_reward"]]
data_sample.to_csv("./data_sample.csv", index=False)
| 1,784 | 41.5 | 135 | py |
AutoCO | AutoCO-main/exp_public/mushroom/simulate/main.py | import os
import sys
import glob
import numpy as np
import torch
import logging
import argparse
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.utils
import torch.nn.functional as F
from torch.autograd import Variable
import time
import utils
from train import train
from vartional_model import DSNAS_v, NASP_v, MAX_v, PLUS_v, CONCAT_v, MIN_v, MULTIPLY_v, NASP
from baseline import FM, FM_v, FM_v2, Random, Egreedy, Thompson, LinUCB, LinEGreedy, LinThompson
os.environ['KMP_DUPLICATE_LIB_OK']='True'
parser = argparse.ArgumentParser(description="Search.")
parser.add_argument('--data', type=str, default='data', help='location of the data corpus')
parser.add_argument('--lr', type=float, default=5e-2, help='init learning rate')
parser.add_argument('--arch_lr', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--weight_decay', type=float, default=1e-5, help='weight decay')
parser.add_argument('--opt', type=str, default='Adagrad', help='choice of opt')
parser.add_argument('--batch_size', type=int, default=512, help='choose batch size')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--search_epochs', type=int, default=100, help='num of searching epochs')
parser.add_argument('--train_epochs', type=int, default=10000, help='num of training epochs')
parser.add_argument('--save', type=str, default='EXP')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--valid_portion', type=float, default=0.25, help='portion of validation data')
parser.add_argument('--dataset', type=str, default='ml-100k', help='dataset')
parser.add_argument('--mode', type=str, default='sif', help='choose how to search')
parser.add_argument('--embedding_dim', type=int, default=8, help='dimension of embedding')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--gen_max_child', action='store_true', default=False, help='generate child network by argmax(alpha)')
parser.add_argument('--gen_max_child_flag', action='store_true', default=False, help='flag of generating child network by argmax(alpha)')
parser.add_argument('--random_sample', action='store_true', default=False, help='true if sample randomly')
parser.add_argument('--early_fix_arch', action='store_true', default=False, help='bn affine flag')
parser.add_argument('--loc_mean', type=float, default=1, help='initial mean value to generate the location')
parser.add_argument('--loc_std', type=float, default=0.01, help='initial std to generate the location')
parser.add_argument('--momentum', type=float, default=0.9, help="momentum")
parser.add_argument('--ofm', action='store_true', default=False, help="different operation with different embedding")
parser.add_argument('--embedding_num', type=int, default=12, help="the size of embedding dictionary")
parser.add_argument('--multi_operation', action='store_true', default=False, help="use multi operation or not")
parser.add_argument('--epsion', type=float, default=0.0, help="epsion of egreedy")
parser.add_argument('--search_epoch', type=int, default=10, help="the epoch num for searching arch")
parser.add_argument('--trans', action='store_true', default=False, help="trans the embedding or not!")
parser.add_argument('--first_order', action='store_true', default=False, help="use first order or not!")
args = parser.parse_args()
print("args ofm:", args.ofm)
print("embedding_num:", args.embedding_num)
save_name = 'experiments/{}/search-{}-{}-{}-{}-{}-{}-{}-{}'.format(args.dataset, time.strftime("%Y%m%d-%H%M%S"),
args.mode, args.save, args.embedding_dim, args.opt, args.lr, args.arch_lr, args.seed)
if args.unrolled:
save_name += '-unrolled'
save_name += '-' + str(np.random.randint(10000))
utils.create_exp_dir(save_name, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(save_name, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
np.random.seed(args.seed)
# torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
# torch.cuda.manual_seed(args.seed)
# logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
data_start = time.time()
if args.mode == "LinUCB" or args.mode == "LinEGreedy" or args.mode == "LinThompson":
train_queue = utils.get_data_queue(args, True)
else:
train_queue = utils.get_data_queue(args, False)
logging.info('prepare data finish! [%f]' % (time.time() - data_start))
if args.mode == "DSNAS_v":
model = DSNAS_v(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = torch.optim.Adam(
[param for name, param in model.named_parameters() if name == 'log_alpha'],
lr=args.arch_lr,
betas=(0.5, 0.999),
weight_decay=0.0
)
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
elif args.mode == "NASP_v":
model = NASP_v(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = torch.optim.Adam(
[param for name, param in model.named_parameters() if name == 'log_alpha'],
lr=args.arch_lr,
betas=(0.5, 0.999),
weight_decay=0.0
)
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
elif args.mode == "NASP":
model = NASP(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = torch.optim.Adam(
[param for name, param in model.named_parameters() if name == 'log_alpha'],
lr=args.arch_lr,
betas=(0.5, 0.999),
weight_decay=0.0
)
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
elif args.mode == "Random":
model = Random(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = None
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "Egreedy":
model = Egreedy(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = None
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "Thompson":
model = Thompson(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = None
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "LinUCB":
model = LinUCB(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = None
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "LinThompson":
model = LinThompson(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = None
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "LinEGreedy":
model = LinEGreedy(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = None
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "FM":
model = FM(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'], args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "FM_v":
model = FM_v(args.embedding_dim, args.weight_decay, args)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "MULTIPLY_v":
model = MULTIPLY_v(args.embedding_dim, args.weight_decay, args)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "MAX_v":
model = MAX_v(args.embedding_dim, args.weight_decay, args)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "MIN_v":
model = MIN_v(args.embedding_dim, args.weight_decay, args)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "PLUS_v":
model = PLUS_v(args.embedding_dim, args.weight_decay, args)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "CONCAT_v":
model = CONCAT_v(args.embedding_dim, args.weight_decay, args)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
else:
raise ValueError("bad choice!")
if __name__ == '__main__':
main()
| 13,976 | 50.386029 | 137 | py |
AutoCO | AutoCO-main/exp_public/mushroom/simulate/utils.py | import numpy as np
import pandas as pd
import os
import os.path
import sys
import shutil
import torch
import torch.nn as nn
import torch.utils
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction import DictVectorizer
from sklearn.utils import shuffle
from torch.utils.data import Dataset, DataLoader
from models import PRIMITIVES_BINARY
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def sample_arch():
arch = {}
arch['mlp'] = {}
arch['mlp']['p'] = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
arch['mlp']['q'] = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
arch['binary'] = PRIMITIVES_BINARY[np.random.randint(len(PRIMITIVES_BINARY))]
return arch
class Mushroom(Dataset):
def __init__(self, root_dir, dummpy):
self.data = pd.read_csv(root_dir)
self.dummpy = dummpy
def __len__(self):
return self.data.shape[0]
def one_hot(self, df, cols):
res = []
for col in cols:
dummies = pd.get_dummies(df[col], prefix=col, drop_first=False)
res.append(dummies)
df = pd.concat(res, axis=1)
return df
def __getitem__(self, index):
sample = {}
if not self.dummpy:
sample["cap-shape"] = self.data.iloc[index]["cap-shape"]
sample["cap-surface"] = self.data.iloc[index]["cap-surface"]
sample["cap-color"] = self.data.iloc[index]["cap-color"]
sample["bruises"] = self.data.iloc[index]["bruises"]
sample["odor"] = self.data.iloc[index]["odor"]
sample["gill-attachment"] = self.data.iloc[index]["gill-attachment"]
sample["gill-spacing"] = self.data.iloc[index]["gill-spacing"]
sample["gill-size"] = self.data.iloc[index]["gill-size"]
sample["gill-color"] = self.data.iloc[index]["gill-color"]
sample["stalk-shape"] = self.data.iloc[index]["stalk-shape"]
sample["stalk-root"] = self.data.iloc[index]["stalk-root"]
sample["stalk-surface-above-ring"] = self.data.iloc[index]["stalk-surface-above-ring"]
sample["stalk-surface-below-ring"] = self.data.iloc[index]["stalk-surface-below-ring"]
sample["stalk-color-above-ring"] = self.data.iloc[index]["stalk-color-above-ring"]
sample["stalk-color-below-ring"] = self.data.iloc[index]["stalk-color-below-ring"]
sample["veil-type"] = self.data.iloc[index]["veil-type"]
sample["veil-color"] = self.data.iloc[index]["veil-color"]
sample["ring-number"] = self.data.iloc[index]["ring-number"]
sample["ring-type"] = self.data.iloc[index]["ring-type"]
sample["spore-print-color"] = self.data.iloc[index]["spore-print-color"]
sample["population"] = self.data.iloc[index]["population"]
sample["habitat"] = self.data.iloc[index]["habitat"]
eat_reward = self.data.iloc[index]["eat_reward"]
noteat_reward = self.data.iloc[index]["noteat_reward"]
sample["label"] = torch.Tensor([noteat_reward, eat_reward])
#sample["label"] = torch.Tensor([eat_reward, noteat_reward])
else:
cols = list(self.data.columns)
_ = cols.remove("eat_reward")
_ = cols.remove("noteat_reward")
data2 = self.one_hot(self.data, cols)
sample["feature"] = torch.Tensor(data2.iloc[index][:])
eat_reward = self.data.iloc[index]["eat_reward"]
noteat_reward = self.data.iloc[index]["noteat_reward"]
sample["label"] = torch.Tensor([eat_reward, noteat_reward])
return sample
def get_data_queue(args, dummpy):
print(args.dataset)
if args.dataset == 'mushroom':
train_data = "../data/data_sample.csv"
train_dataset = Mushroom(train_data, dummpy)
train_queue = DataLoader(train_dataset, batch_size=args.batch_size,pin_memory=True)
return train_queue
else:
return None
class Mushroom2(Dataset):
def __init__(self, contexts, pos_weights):
self.data = contexts
self.pos_weights = pos_weights
def __len__(self):
return self.data["label"].shape[0]
def __getitem__(self, index):
sample = {}
sample["cap-shape"] = self.data["cap-shape"][index]
sample["cap-surface"] = self.data["cap-surface"][index]
sample["cap-color"] = self.data["cap-color"][index]
sample["bruises"] = self.data["bruises"][index]
sample["odor"] = self.data["odor"][index]
sample["gill-attachment"] = self.data["gill-attachment"][index]
sample["gill-spacing"] = self.data["gill-spacing"][index]
sample["gill-size"] = self.data["gill-size"][index]
sample["gill-color"] = self.data["gill-color"][index]
sample["stalk-shape"] = self.data["stalk-shape"][index]
sample["stalk-root"] = self.data["stalk-root"][index]
sample["stalk-surface-above-ring"] = self.data["stalk-surface-above-ring"][index]
sample["stalk-surface-below-ring"] = self.data["stalk-surface-below-ring"][index]
sample["stalk-color-above-ring"] = self.data["stalk-color-above-ring"][index]
sample["stalk-color-below-ring"] = self.data["stalk-color-below-ring"][index]
sample["veil-type"] = self.data["veil-type"][index]
sample["veil-color"] = self.data["veil-color"][index]
sample["ring-number"] = self.data["ring-number"][index]
sample["ring-type"] = self.data["ring-type"][index]
sample["spore-print-color"] = self.data["spore-print-color"][index]
sample["population"] = self.data["population"][index]
sample["habitat"] = self.data["habitat"][index]
sample["label"] = self.data["label"][index]
sample["pos_weights"] = self.pos_weights[index]
return sample
def get_data_queue_bandit(args, contexts, pos_weights):
train_dataset = Mushroom2(contexts, pos_weights)
train_queue = DataLoader(train_dataset, batch_size=args.batch_size,pin_memory=True)
return train_queue
| 6,504 | 43.554795 | 98 | py |
AutoCO | AutoCO-main/exp_public/mushroom/simulate/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import utils
import time
PRIMITIVES_BINARY = ['plus', 'multiply', 'max', 'min', 'concat']
PRIMITIVES_NAS = [0, 2, 4, 8, 16]
SPACE_NAS = pow(len(PRIMITIVES_NAS), 5)
OPS = {
'plus': lambda p, q: p + q,
'multiply': lambda p, q: p * q,
'max': lambda p, q: torch.max(torch.stack((p, q)), dim=0)[0],
'min': lambda p, q: torch.min(torch.stack((p, q)), dim=0)[0],
'concat': lambda p, q: torch.cat([p, q], dim=-1),
'norm_0': lambda p: torch.ones_like(p),
'norm_0.5': lambda p: torch.sqrt(torch.abs(p) + 1e-7),
'norm_1': lambda p: torch.abs(p),
'norm_2': lambda p: p ** 2,
'I': lambda p: torch.ones_like(p),
'-I': lambda p: -torch.ones_like(p),
'sign': lambda p: torch.sign(p),
}
def constrain(p):
c = torch.norm(p, p=2, dim=1, keepdim=True)
c[c < 1] = 1.0
p.data.div_(c)
def MixedBinary(embedding_p, embedding_q, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w,primitive,fc in zip(weights,PRIMITIVES_BINARY,FC)]), 0)
class Virtue(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False):
super(Virtue, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_all = nn.ModuleDict({})
self.columns = ["cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
"gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
"stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type",
"veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
if not ofm:
for name in self.columns:
self.embedding_all[name] = nn.Embedding(embedding_num, embedding_dim)
else:
for name in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
temp.append(nn.Embedding(embedding_num, embedding_dim))
self.embedding_all[name] = temp
def compute_loss(self, inferences, labels, regs):
labels = torch.reshape(labels, [-1,1])
loss = F.binary_cross_entropy_with_logits(inferences, labels.float())
#loss = F.mse_loss(inferences, labels)
return loss + regs
class Network(Virtue):
def __init__(self, embedding_dim, arch, reg):
super(Network, self).__init__(embedding_dim, reg)
self.arch = arch
self.mlp_p = arch['mlp']['p']
self.mlp_q = arch['mlp']['q']
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
if arch['binary'] == 'concat':
self.FC[name1+ ":" + name2] = nn.Linear(2*embedding_dim, 1, bias=False)
else:
self.FC[name1 + ":" + name2] = nn.Linear(embedding_dim, 1, bias=False)
def forward(self, features):
for value in self.FC.values():
constrain(next(value.parameters()))
inferences = 0
regs = 0
for name1 in self.columns:
for name2 in self.columns:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
regs += self.reg * (torch.norm(name1_embedding) + torch.norm(name2_embedding))
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1,1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += self.FC[name1 + ":" + name2](OPS[self.arch['binary']](name1_embedding_trans, name2_embedding_trans))
return inferences, regs
class Network_Search(Virtue):
def __init__(self, embedding_dim, reg):
super(Network_Search, self).__init__(embedding_dim, reg)
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self._arch_parameters = {}
self._arch_parameters['mlp'] = {}
self._arch_parameters['mlp']['p'] = self.mlp_p
self._arch_parameters['mlp']['q'] = self.mlp_q
self._arch_parameters['binary'] = Variable(torch.ones(len(PRIMITIVES_BINARY),
dtype=torch.float, device='cpu') / 2, requires_grad=True)
#self._arch_parameters['binary'] = Variable(torch.Tensor([1.0,1.0,1.0,1.0,1.0]), requires_grad=True)
self._arch_parameters['binary'].data.add_(
torch.randn_like(self._arch_parameters['binary'])*1e-3)
def arch_parameters(self):
return list(self._arch_parameters['mlp']['p'].parameters()) + \
list(self._arch_parameters['mlp']['q'].parameters()) + [self._arch_parameters['binary']]
def new(self):
model_new = Network_Search(self.num_users, self.num_items, self.embedding_dim, self.reg)
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data = y.data.clone()
return model_new
def clip(self):
m = nn.Hardtanh(0, 1)
self._arch_parameters['binary'].data = m(self._arch_parameters['binary'])
def binarize(self):
self._cache = self._arch_parameters['binary'].clone()
max_index = self._arch_parameters['binary'].argmax().item()
for i in range(self._arch_parameters['binary'].size(0)):
if i == max_index:
self._arch_parameters['binary'].data[i] = 1.0
else:
self._arch_parameters['binary'].data[i] = 0.0
def recover(self):
self._arch_parameters['binary'].data = self._cache
del self._cache
def forward(self, features):
# for i in range(len(PRIMITIVES_BINARY)):
# constrain(next(self._FC[i].parameters()))
inferences = 0
regs = 0
for name1 in self.columns:
for name2 in self.columns:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
regs += self.reg * (torch.norm(name1_embedding) + torch.norm(name2_embedding))
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, self._arch_parameters['binary'], self.FC[name1 + ":" + name2])
return inferences, regs
def genotype(self):
genotype = PRIMITIVES_BINARY[self._arch_parameters['binary'].argmax().cpu().numpy()]
genotype_p = F.softmax(self._arch_parameters['binary'], dim=-1)
return genotype, genotype_p.cpu().detach()
def step(self, features, features_valid, lr, arch_optimizer, unrolled):
self.zero_grad()
arch_optimizer.zero_grad()
# binarize before forward propagation
self.binarize()
loss = self._backward_step(features_valid)
# restore weight before updating
self.recover()
arch_optimizer.step()
return loss
def _backward_step(self, features_valid):
inferences, regs = self(features_valid)
loss = self.compute_loss(inferences, features_valid["label"], regs)
loss.backward()
return loss
class DSNAS(Virtue):
def __init__(self, embedding_dim, reg, args):
super(DSNAS, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
if self.args.multi_operation:
num_op = len(self.columns)
self.log_alpha = torch.nn.Parameter(torch.zeros((num_op*num_op, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
else:
self.log_alpha = torch.nn.Parameter(torch.zeros((1, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
self._arch_parameters = [self.log_alpha]
self.weights = Variable(torch.zeros_like(self.log_alpha))
if self.args.early_fix_arch:
self.fix_arch_index = {}
def recommend(self, features):
self.eval()
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
name1_embedding_trans = name1_embedding#self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = name2_embedding#self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
#ipdb.set_trace()
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
simulate_index = []
for i in range(len(max_index)):
if np.random.random() < self.args.epsion:
simulate_index.append(np.random.randint(0, 2))
else:
simulate_index.append(max_index[i])
a_ind = np.array([(i, val) for i, val in enumerate(simulate_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
cnt = 0
time_data = 0
time_forward = 0
time_update = 0
end = -1
for step, features in enumerate(train_bandit):
if end!=-1:
time_data += time.time() - end
begin = time.time()
optimizer.zero_grad()
arch_optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
time_forward += time.time() - begin
losses.append(error_loss.cpu().detach().item())
begin2 = time.time()
optimizer.step()
arch_optimizer.step()
time_update += time.time() - begin2
cnt += 1
end = time.time()
print("time_data: ", time_data)
print("time_forward: ", time_forward)
print("time_update: ", time_update)
print("cnt: ", cnt)
return np.mean(losses)
def revised_arch_index(self):
if self.args.early_fix_arch:
sort_log_alpha = torch.topk(F.softmax(self.log_alpha.data, dim=-1), 2)
argmax_index = (sort_log_alpha[0][:, 0] - sort_log_alpha[0][:, 1] >= 0.01)
for id in range(argmax_index.size(0)):
if argmax_index[id] == 1 and id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [sort_log_alpha[1][id, 0].item(),
self.log_alpha.detach().clone()[id, :]]
for key, value_lst in self.fix_arch_index.items():
self.log_alpha.data[key, :] = value_lst[1]
def forward(self, features):
regs = 0
self.weights = self._get_weights(self.log_alpha)
self.revised_arch_index()
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
cate_prob = F.softmax(self.log_alpha, dim=-1)
self.cate_prob = cate_prob.clone().detach()
loss_alpha = torch.log(
(self.weights * F.softmax(self.log_alpha, dim=-1)).sum(-1)).sum()
self.weights.requires_grad_()
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from sklearn.externals.joblib import Parallel, delayed
names_all = []
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
names_all.append([name1_embedding, name2_embedding, cur_weights.view(-1,), self.FC[name1 + ":" + name2]])
res = Parallel(n_jobs=8, backend="threading")(delayed(MixedBinary)(para1, para2, para3, para4) for para1,para2,para3,para4 in names_all)
inferences = sum(res)
# for name1 in self.columns:
# for name2 in self.columns:
# if self.args.multi_operation:
# cur_weights = self.weights[cur_index]
# max_index = cur_weights.argmax().item()
# cur_index += 1
# if self.args.ofm:
# name1_embedding = self.embedding_all[name1][max_index](features[name1])
# name2_embedding = self.embedding_all[name2][max_index](features[name2])
# else:
# name1_embedding = self.embedding_all[name1](features[name1])
# name2_embedding = self.embedding_all[name2](features[name2])
# regs += self.reg * (torch.norm(name1_embedding) + torch.norm(name2_embedding))
# name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
# name2_embedding_trans = self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
# inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
self.weights.grad = torch.zeros_like(self.weights)
(weighted_loss + loss_alpha).backward()
self.block_reward = self.weights.grad.data.sum(-1)
self.log_alpha.grad.data.mul_(self.block_reward.view(-1, 1))
return inferences, weighted_loss, loss_alpha
def _get_weights(self, log_alpha):
if self.args.random_sample:
uni = torch.ones_like(log_alpha)
m = torch.distributions.one_hot_categorical.OneHotCategorical(uni)
else:
m = torch.distributions.one_hot_categorical.OneHotCategorical(probs=F.softmax(log_alpha, dim=-1))
return m.sample()
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
if not self.args.multi_operation:
genotype = PRIMITIVES_BINARY[self.log_alpha.argmax().cpu().numpy()]
genotype_p = F.softmax(self.log_alpha, dim=-1)
else:
genotype = []
for index in self.log_alpha.argmax(axis=1).cpu().numpy():
genotype.append(PRIMITIVES_BINARY[index])
genotype = ":".join(genotype[:10])
genotype_p = F.softmax(self.log_alpha, dim=-1)[:10]
return genotype, genotype_p.cpu().detach()
class Uniform:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
pos_weights = torch.zeros_like(features["label"])
max_index = np.random.randint(0, 2, features["label"].shape[0])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
return reward
def step(self, optimizer, arch_optimizer):
return 0
def genotype(self):
return "uniform", 0
class Egreedy:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
self.epsion = 0.2
self.action_rewards = {0:[0,1.0], 1:[0,1.0]}#total reward, action_num
self.max_action = 0
def recommend(self, features):
max_reward = np.float("-inf")
for key in self.action_rewards:
if self.action_rewards[key][0]/self.action_rewards[key][1] > max_reward:
max_reward = self.action_rewards[key][0]/self.action_rewards[key][1]
self.max_action = key
pos_weights = torch.zeros_like(features["label"])
max_index = np.random.randint(0, 2, features["label"].shape[0])
simulate_index = []
for i in range(len(max_index)):
if np.random.random()<self.epsion:
simulate_index.append(max_index[i])
else:
simulate_index.append(self.max_action)
a_ind = np.array([(i, val) for i, val in enumerate(simulate_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
action_rewards = torch.sum(torch.mul(features["label"], pos_weights), dim=0)
action_nums = torch.sum(pos_weights, dim=0)
for key in self.action_rewards:
temp = self.action_rewards[key]
temp[0] += action_rewards[key].cpu().detach().item()
temp[1] += action_nums[key].cpu().detach().item()
self.action_rewards[key] = temp
return reward
def step(self, optimizer, arch_optimizer):
return 0
def genotype(self):
return "uniform", 0
class FM(Virtue):
def __init__(self, embedding_dim, reg, args):
super(FM, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([1, 1, 1, 1, 1.0])
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
def recommend(self, features):
self.eval()
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
name1_embedding_trans = name1_embedding#self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = name2_embedding#self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
cnt = 0
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
cnt += 1
print("cnt: ", cnt)
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
regs += self.reg * (torch.norm(name1_embedding) + torch.norm(name2_embedding))
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
weighted_loss.backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM", 0
class Plus(FM):
def __init__(self, embedding_dim, reg, args):
super(Plus, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([1.0, 0.0, 0.0, 0.0, 0.0]))
def genotype(self):
return "Plus", 0
class Max(FM):
def __init__(self, embedding_dim, reg, args):
super(Max, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([0.0, 0.0, 1.0, 0.0, 0.0]))
def genotype(self):
return "Max", 0
class Min(FM):
def __init__(self, embedding_dim, reg, args):
super(Min, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([0.0, 0.0, 0.0, 1.0, 0.0]))
def genotype(self):
return "Min", 0
class Concat(FM):
def __init__(self, embedding_dim, reg, args):
super(FM, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([0.0, 0.0, 0.0, 0.0, 1.0]))
def genotype(self):
return "Concat", 0
| 29,059 | 42.897281 | 165 | py |
AutoCO | AutoCO-main/exp_public/mushroom/simulate/baseline.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import utils
from collections import Counter
from torch.distributions.multivariate_normal import MultivariateNormal
PRIMITIVES_BINARY = ['plus', 'multiply', 'max', 'min', 'concat']
PRIMITIVES_NAS = [0, 2, 4, 8, 16]
SPACE_NAS = pow(len(PRIMITIVES_NAS), 5)
OPS = {
'plus': lambda p, q: p + q,
'multiply': lambda p, q: p * q,
'max': lambda p, q: torch.max(torch.stack((p, q)), dim=0)[0],
'min': lambda p, q: torch.min(torch.stack((p, q)), dim=0)[0],
'concat': lambda p, q: torch.cat([p, q], dim=-1),
'norm_0': lambda p: torch.ones_like(p),
'norm_0.5': lambda p: torch.sqrt(torch.abs(p) + 1e-7),
'norm_1': lambda p: torch.abs(p),
'norm_2': lambda p: p ** 2,
'I': lambda p: torch.ones_like(p),
'-I': lambda p: -torch.ones_like(p),
'sign': lambda p: torch.sign(p),
}
class Virtue_v(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False, first_order=False):
super(Virtue_v, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_mean = nn.ModuleDict({})
self.embedding_std = nn.ModuleDict({})
if first_order:
self.embedding_first_order = nn.ModuleDict({})
self.columns = ["cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
"gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
"stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type",
"veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
for name in self.columns:
self.embedding_mean[name] = nn.Embedding(embedding_num, embedding_dim)
self.embedding_std[name] = nn.Embedding(embedding_num, embedding_dim)
if first_order:
self.embedding_first_order[name] = nn.Embedding(embedding_num, 1)
self.embedding_action = nn.Embedding(2, embedding_dim)
if first_order:
self.embedding_action_first_order = nn.Embedding(2, 1)
class Virtue(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False, first_order=False):
super(Virtue, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_all = nn.ModuleDict({})
if first_order:
self.embedding_first_order = nn.ModuleDict({})
self.columns = ["cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
"gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
"stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type",
"veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
for name in self.columns:
self.embedding_all[name] = nn.Embedding(embedding_num, embedding_dim)
if first_order:
self.embedding_first_order[name] = nn.Embedding(embedding_num, 1)
self.embedding_action = nn.Embedding(2, embedding_dim)
if first_order:
self.embedding_action_first_order = nn.Embedding(2, 1)
class FM_v(Virtue_v):
"""
FM with EE
"""
def __init__(self, embedding_dim, reg, args):
super(FM_v, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num, first_order=args.first_order)
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.log_alpha = torch.Tensor([1, 1, 1, 1, 1.0])
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
inferences = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
inferences += torch.sum(name1_embedding*name2_embedding, dim=1, keepdim=True)
inferences_0 = 0 # inferences.clone() #action 0
inferences_1 = 0 # inferences.clone() #action_1
#features with action
for name1 in self.columns:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()), dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()), dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
inferences += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
# features with action
inferences_0 = 0 #inferences.clone() # action 0
inferences_1 = 0 #inferences.clone() # action_1
for name1 in self.columns:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()),
dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()),
dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
(weighted_loss + kl/features["label"].shape[0]).backward()
# weighted_loss.backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM_v", 0
class FM(Virtue):
"""
FM without EE
"""
def __init__(self, embedding_dim, reg, args):
super(FM, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num, first_order=args.first_order)
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.log_alpha = torch.Tensor([1, 1, 1, 1, 1.0])
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def recommend(self, features):
self.eval()
inferences = 0
#inferences_0 = 0
#inferences_1 = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
inferences += torch.sum(name1_embedding*name2_embedding, dim=1, keepdim=True)
#inferences_0 += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
#inferences_1 += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
inferences_0 = 0 #inferences.clone() # action 0
inferences_1 = 0 #inferences.clone() # action_1
#features with action
for name1 in self.columns:
name1_embedding = self.embedding_all[name1](features[name1])
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()), dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()), dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
#inferences_0 = 0
#inferences_1 = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
inferences += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
#inferences_0 += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
#inferences_1 += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
# features with action
inferences_0 = 0 #inferences.clone() # action 0
inferences_1 = 0 #inferences.clone() # action_1
for name1 in self.columns:
name1_embedding = self.embedding_all[name1](features[name1])
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()),
dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()),
dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
weighted_loss.backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM", 0
class FM_v2(Virtue_v):
"""
FM with EE and FC layer
"""
def __init__(self, embedding_dim, reg, args):
super(FM_v2, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num, first_order=args.first_order)
self.args = args
self._initialize_alphas()
self.FC = nn.ModuleDict({})
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
self.FC[name1 + ":" + name2] = nn.Linear(embedding_dim, 1, bias=False)
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.log_alpha = torch.Tensor([1, 1, 1, 1, 1.0])
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
inferences = 0
inferences_0 = 0
inferences_1 = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
inferences += self.FC[name1 + ":" + name2](name1_embedding * name2_embedding)
inferences_0 = inferences.clone() # action 0
inferences_1 = inferences.clone() # action_1
#features with action
for name1 in self.columns:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()), dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()), dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, step):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
cnt = 0
for step, features in enumerate(train_bandit):
cnt += 1
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
print("cnt: ", cnt)
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
inferences += self.FC[name1 + ":" + name2](name1_embedding * name2_embedding)
# features with action
inferences_0 = inferences.clone() # action 0
inferences_1 = inferences.clone() # action_1
for name1 in self.columns:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()),
dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()),
dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
(weighted_loss + kl/features["label"].shape[0]).backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM_v2", 0
class Random:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
pos_weights = torch.zeros_like(features["label"])
max_index = np.random.randint(0, 2, features["label"].shape[0])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "Random", 0
class Egreedy:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
self.epsion = 0.2
self.action_rewards = {0:[0,1.0], 1:[0.1,1.0]}#total reward, action_num
self.max_action = 0
def recommend(self, features):
max_reward = np.float("-inf")
for key in self.action_rewards:
if self.action_rewards[key][0]/self.action_rewards[key][1] > max_reward:
max_reward = self.action_rewards[key][0]/self.action_rewards[key][1]
self.max_action = key
pos_weights = torch.zeros_like(features["label"])
max_index = np.random.randint(0, 2, features["label"].shape[0])
simulate_index = []
for i in range(len(max_index)):
if np.random.random()<self.epsion:
simulate_index.append(max_index[i])
else:
simulate_index.append(self.max_action)
a_ind = np.array([(i, val) for i, val in enumerate(simulate_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
action_rewards = torch.sum(torch.mul(features["label"], pos_weights), dim=0)
action_nums = torch.sum(pos_weights, dim=0)
for key in self.action_rewards:
temp = self.action_rewards[key]
temp[0] += action_rewards[key].cpu().detach().item()
temp[1] += action_nums[key].cpu().detach().item()
self.action_rewards[key] = temp
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "Egreedy", 0
class Thompson:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
self.epsion = 0.2
self.action_rewards = {0:[0,0], 1:[0,0]}#total reward, action_num
self.max_action = 0
def recommend(self, features):
#Thompson sampling
values = []
num = 2
N = 10000
for index in range(num):
pos = np.random.beta(1+int(self.action_rewards[index][0]), 2+int(self.action_rewards[index][1]), N)
values.append(pos)
action_pos = np.vstack(values)
action_num = Counter(action_pos.argmax(axis=0))
action_percentage = []
for index in range(num):
action_percentage.append(action_num[index]/N)
simulate_index = []
for i in range(features["label"].shape[0]):
simulate_index.append(np.random.choice(range(num), p=action_percentage))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(simulate_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
action_rewards = torch.sum(torch.mul(features["label"], pos_weights), dim=0)
action_nums = torch.sum(pos_weights, dim=0)
for key in self.action_rewards:
temp = self.action_rewards[key]
temp[0] += action_rewards[key].cpu().detach().item()
temp[1] += action_nums[key].cpu().detach().item()
self.action_rewards[key] = temp
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "Thompson", 0
class LinUCB2:
def __init__(self, embedding_dim, reg, args):
self.Aa = torch.eye(119)
self.ba = torch.zeros(119).view(-1,1)
self.alpha = 0.1
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
action1_features = torch.zeros((features["label"].shape[0], 2))
action1_features[:, 0] = 1.0
action2_features = torch.zeros((features["label"].shape[0], 2))
action2_features[:, 1] = 1.0
action1_input = torch.cat([features["feature"], action1_features], dim=1)
action2_input = torch.cat([features["feature"], action2_features], dim=1)
inputs_all = [action1_input, action2_input]
theta = torch.matmul(torch.inverse(self.Aa), self.ba)
action1_score = torch.matmul(action1_input, theta) + self.alpha * torch.sqrt(
torch.sum(torch.mul(torch.matmul(action1_input, torch.inverse(self.Aa)), action1_input), dim=-1)).view(-1,1)
action2_score = torch.matmul(action2_input, theta) + self.alpha * torch.sqrt(
torch.sum(torch.mul(torch.matmul(action2_input, torch.inverse(self.Aa)), action2_input), dim=-1)).view(-1, 1)
score_all = torch.cat([action1_score, action2_score], dim=1)
max_index = score_all.argmax(dim=1)
print(Counter(max_index.numpy()))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
#update Aa and ba
for i in range(max_index.shape[0]):
cur_action = max_index[i].item()
cur_reward = features["label"][i, cur_action].item()
cur_feature = inputs_all[cur_action][i]
self.Aa += torch.matmul(cur_feature.view(-1,1), cur_feature.view(1,-1))
self.ba += cur_reward * cur_feature.view(-1,1)
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "LinUCB2", 0
#
class LinUCB:
def __init__(self, embedding_dim, reg, args):
self.action_num = 2
self.feature_dim = 117
self.Aa = []
self.ba = []
for i in range(self.action_num):
self.Aa.append(torch.eye(self.feature_dim))
self.ba.append(torch.zeros(self.feature_dim).view(-1,1))
self.alpha = 1.0
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
score_all = []
for i in range(self.action_num):
Aa = self.Aa[i]
ba = self.ba[i]
theta = torch.matmul(torch.inverse(Aa), ba)
score = torch.matmul(features["feature"], theta) + self.alpha * torch.sqrt(
torch.sum(torch.mul(torch.matmul(features["feature"], torch.inverse(Aa)), features["feature"]), dim=-1)
).view(-1,1)
score_all.append(score)
score_all = torch.cat(score_all, dim=1)
max_index = score_all.argmax(dim=1)
print(Counter(max_index.numpy()))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
#update Aa and ba
for i in range(max_index.shape[0]):
cur_action = max_index[i].item()
cur_reward = features["label"][i, cur_action].item()
cur_feature = features["feature"][i]
Aa = self.Aa[cur_action]
ba = self.ba[cur_action]
Aa += torch.matmul(cur_feature.view(-1,1), cur_feature.view(1,-1))
ba += cur_reward * cur_feature.view(-1,1)
self.Aa[cur_action] = Aa
self.ba[cur_action] = ba
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "LinUCB", 0
class LinThompson:
def __init__(self, embedding_dim, reg, args):
self.action_num = 2
self.feature_dim = 117
self.Aa = []
self.ba = []
for i in range(self.action_num):
self.Aa.append(torch.eye(self.feature_dim))
self.ba.append(torch.zeros(self.feature_dim).view(-1, 1))
self.alpha = 1.0
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
score_all = []
for i in range(self.action_num):
Aa = self.Aa[i]
ba = self.ba[i]
mu = torch.matmul(torch.inverse(Aa), ba)
variance = torch.inverse(Aa)
try:
theta = MultivariateNormal(loc=mu.view(-1), covariance_matrix=self.alpha * variance).sample().view(-1,1)
except:
print("Error here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
theta = mu.view(-1,1)
score = torch.matmul(features["feature"], theta) + self.alpha * torch.sqrt(
torch.sum(torch.mul(torch.matmul(features["feature"], torch.inverse(Aa)), features["feature"]), dim=-1)
).view(-1, 1)
score_all.append(score)
score_all = torch.cat(score_all, dim=1)
max_index = score_all.argmax(dim=1)
print(Counter(max_index.numpy()))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
# update Aa and ba
for i in range(max_index.shape[0]):
cur_action = max_index[i].item()
cur_reward = features["label"][i, cur_action].item()
cur_feature = features["feature"][i]
Aa = self.Aa[cur_action]
ba = self.ba[cur_action]
Aa += torch.matmul(cur_feature.view(-1, 1), cur_feature.view(1, -1))
ba += cur_reward * cur_feature.view(-1, 1)
self.Aa[cur_action] = Aa
self.ba[cur_action] = ba
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "LinThompson", 0
class LinEGreedy:
def __init__(self, embedding_dim, reg, args):
self.Aa = torch.eye(117)
self.ba = torch.zeros(117).view(-1,1)
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
self.epsion = 0.2
self.turn = True
def recommend(self, features):
action1_features = torch.zeros((features["label"].shape[0], 2))
action1_features[:, 0] = 1.0
action2_features = torch.zeros((features["label"].shape[0], 2))
action2_features[:, 1] = 1.0
action1_input = torch.cat([features["feature"], action1_features], dim=1)
action2_input = torch.cat([features["feature"], action2_features], dim=1)
inputs_all = [action1_input, action2_input]
theta = torch.matmul(torch.inverse(self.Aa), self.ba)
action1_score = torch.matmul(action1_input, theta)
action2_score = torch.matmul(action2_input, theta)
score_all = torch.cat([action1_score, action2_score], dim=1)
max_index = score_all.argmax(dim=1)
if self.turn:
simulate_index = []
for i in range(len(max_index)):
if np.random.random() < self.epsion:
simulate_index.append(max_index[i].item())
else:
simulate_index.append(np.random.randint(0, 2))
max_index = simulate_index
self.turn = False
print(Counter(max_index))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
#update Aa and ba
for i in range(len(max_index)):
cur_action = max_index[i]
cur_reward = features["label"][i, cur_action].item()
cur_feature = inputs_all[cur_action][i]
self.Aa += torch.matmul(cur_feature.view(-1,1), cur_feature.view(1,-1))
self.ba += cur_reward * cur_feature.view(-1,1)
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "LinEGreedy", 0
| 37,783 | 46.112219 | 141 | py |
AutoCO | AutoCO-main/exp_public/mushroom/simulate/vartional_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import utils
PRIMITIVES_BINARY = ['plus', 'multiply', 'max', 'min', 'concat']
PRIMITIVES_NAS = [0, 2, 4, 8, 16]
SPACE_NAS = pow(len(PRIMITIVES_NAS), 5)
OPS = {
'plus': lambda p, q: p + q,
'multiply': lambda p, q: p * q,
'max': lambda p, q: torch.max(torch.stack((p, q)), dim=0)[0],
'min': lambda p, q: torch.min(torch.stack((p, q)), dim=0)[0],
'concat': lambda p, q: torch.cat([p, q], dim=-1),
'norm_0': lambda p: torch.ones_like(p),
'norm_0.5': lambda p: torch.sqrt(torch.abs(p) + 1e-7),
'norm_1': lambda p: torch.abs(p),
'norm_2': lambda p: p ** 2,
'I': lambda p: torch.ones_like(p),
'-I': lambda p: -torch.ones_like(p),
'sign': lambda p: torch.sign(p),
}
def MixedBinary(embedding_p, embedding_q, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w,primitive,fc in zip(weights,PRIMITIVES_BINARY,FC)]), 0)
class Virtue(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False):
super(Virtue, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_mean = nn.ModuleDict({})
self.embedding_std = nn.ModuleDict({})
self.columns = ["cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
"gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
"stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type",
"veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
if not ofm:
for name in self.columns:
self.embedding_mean[name] = nn.Embedding(embedding_num, embedding_dim)
self.embedding_std[name] = nn.Embedding(embedding_num, embedding_dim)
else:
for name in self.columns:
temp_mean = nn.ModuleList()
temp_std = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
temp_mean.append(nn.Embedding(embedding_num, embedding_dim))
temp_std.append(nn.Embedding(embedding_num, embedding_dim))
self.embedding_mean[name] = temp_mean
self.embedding_std[name] = temp_std
class Virtue2(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False):
super(Virtue2, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_all = nn.ModuleDict({})
self.columns = ["cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
"gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
"stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type",
"veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
if not ofm:
for name in self.columns:
self.embedding_all[name] = nn.Embedding(embedding_num, embedding_dim)
else:
for name in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
temp.append(nn.Embedding(embedding_num, embedding_dim))
self.embedding_all[name] = temp
class DSNAS_v(Virtue):
def __init__(self, embedding_dim, reg, args):
super(DSNAS_v, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
if self.args.multi_operation:
num_op = len(self.columns)
self.log_alpha = torch.nn.Parameter(torch.zeros((num_op*num_op, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
else:
self.log_alpha = torch.nn.Parameter(torch.zeros((1, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
self._arch_parameters = [self.log_alpha]
self.weights = Variable(torch.zeros_like(self.log_alpha))
if self.args.early_fix_arch:
self.fix_arch_index = {}
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
name1_embedding_trans = name1_embedding#self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = name2_embedding#self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
if epoch < self.args.search_epoch:
arch_optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features, epoch)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
if epoch < self.args.search_epoch:
arch_optimizer.step()
return np.mean(losses)
def revised_arch_index(self, epoch):
if self.args.early_fix_arch:
if epoch < self.args.search_epoch:
sort_log_alpha = torch.topk(F.softmax(self.log_alpha.data, dim=-1), 2)
argmax_index = (sort_log_alpha[0][:, 0] - sort_log_alpha[0][:, 1] >= 0.10)
for id in range(argmax_index.size(0)):
if argmax_index[id] == 1 and id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [sort_log_alpha[1][id, 0].item(),
self.log_alpha.detach().clone()[id, :]]
if epoch >= self.args.search_epoch:
#fix the arch。
max_index = torch.argmax(self.log_alpha, dim=-1)
for id in range(max_index.size(0)):
if id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [max_index[id].item(), self.log_alpha.detach().clone()[id, :]]
for key, value_lst in self.fix_arch_index.items():
self.log_alpha.data[key, :] = value_lst[1]
def forward(self, features, epoch):
self.weights = self._get_weights(self.log_alpha)
self.revised_arch_index(epoch)
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
if epoch<self.args.search_epoch:
cate_prob = F.softmax(self.log_alpha, dim=-1)
self.cate_prob = cate_prob.clone().detach()
loss_alpha = torch.log(
(self.weights * F.softmax(self.log_alpha, dim=-1)).sum(-1)).sum()
self.weights.requires_grad_()
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from joblib import Parallel, delayed
names_all = []
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
names_all.append(
[name1_embedding, name2_embedding, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
res = Parallel(n_jobs=8, backend="threading")(
delayed(MixedBinary)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
inferences = sum(res)
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
if not self.args.ofm:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
else:
for index in range(len(PRIMITIVES_BINARY)):
kl += self.KL_distance(self.embedding_mean[name][index].weight,
0 * torch.ones_like(self.embedding_mean[name][index].weight),
torch.log(1 + torch.exp(self.embedding_std[name][index].weight)),
0.1 * torch.ones_like(self.embedding_std[name][index].weight))
if epoch < self.args.search_epoch:
self.weights.grad = torch.zeros_like(self.weights)
(weighted_loss + loss_alpha + kl/features["label"].shape[0]).backward()
self.block_reward = self.weights.grad.data.sum(-1)
self.log_alpha.grad.data.mul_(self.block_reward.view(-1, 1))
return inferences, weighted_loss, loss_alpha
else:
(weighted_loss + kl/features["label"].shape[0]).backward()
return inferences, weighted_loss, 0
def _get_weights(self, log_alpha):
if self.args.random_sample:
uni = torch.ones_like(log_alpha)
m = torch.distributions.one_hot_categorical.OneHotCategorical(uni)
else:
m = torch.distributions.one_hot_categorical.OneHotCategorical(probs=F.softmax(log_alpha, dim=-1))
return m.sample()
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
if not self.args.multi_operation:
genotype = PRIMITIVES_BINARY[self.log_alpha.argmax().cpu().numpy()]
genotype_p = F.softmax(self.log_alpha, dim=-1)
else:
genotype = []
for index in self.log_alpha.argmax(axis=1).cpu().numpy():
genotype.append(PRIMITIVES_BINARY[index])
genotype = ":".join(genotype[:10])
genotype_p = F.softmax(self.log_alpha, dim=-1)[:10]
return genotype, genotype_p.cpu().detach()
class NASP(Virtue2):
def __init__(self, embedding_dim, reg, args):
super(NASP, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
if self.args.multi_operation:
num_op = len(self.columns)
self.log_alpha = torch.nn.Parameter(torch.zeros((int(num_op*(num_op-1)/2), len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
else:
self.log_alpha = torch.nn.Parameter(torch.zeros((1, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
self._arch_parameters = [self.log_alpha]
self.weights = Variable(torch.zeros_like(self.log_alpha))
if self.args.early_fix_arch:
self.fix_arch_index = {}
self.rand_array = torch.randn(3000000)
def recommend(self, features):
self.eval()
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
if self.args.trans:
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
else:
name1_embedding_trans = name1_embedding
name2_embedding_trans = name2_embedding
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def MixedBinary_ofm(self, embedding_p_all, embedding_q_all, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w, primitive, fc, embedding_p, embedding_q in zip(weights, PRIMITIVES_BINARY, FC, embedding_p_all, embedding_q_all)]), 0)
def MixedBinary_all(self, embedding_p, embedding_q, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w, primitive, fc in zip(weights, PRIMITIVES_BINARY, FC)]), 0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
arch_optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features, epoch, search=True)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
arch_optimizer.step()
return np.mean(losses)
def forward(self, features, epoch, search):
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
self.weights.requires_grad_()
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from sklearn.externals.joblib import Parallel, delayed
names_all = []
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
embedding_name1_all = []
embedding_name2_all = []
for index_name in range(len(PRIMITIVES_BINARY)):
name1_embedding = self.embedding_all[name1][index_name](features[name1])
embedding_name1_all.append(name1_embedding)
name2_embedding = self.embedding_all[name2][index_name](features[name2])
embedding_name2_all.append(name2_embedding)
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
if self.args.trans:
if self.args.ofm:
embedding_name1_all_temp = []
embedding_name2_all_temp = []
for index_temp in range(len(embedding_name1_all)):
embedding_name1_all_temp.append(self.mlp_p(embedding_name1_all[index_temp].view(-1, 1)).view(embedding_name1_all[index_temp].size()))
embedding_name2_all_temp.append(self.mlp_p(embedding_name2_all[index_temp].view(-1, 1)).view(embedding_name2_all[index_temp].size()))
embedding_name1_all = embedding_name1_all_temp
embedding_name2_all = embedding_name2_all_temp
else:
name1_embedding = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
if self.args.ofm:
names_all.append([embedding_name1_all, embedding_name2_all, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
else:
names_all.append(
[name1_embedding, name2_embedding, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
if self.args.ofm:
res = Parallel(n_jobs=8, backend="threading")(
delayed(self.MixedBinary_ofm)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
else:
res = Parallel(n_jobs=8, backend="threading")(
delayed(self.MixedBinary_all)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
inferences = sum(res)
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
weighted_loss.backward()
self.log_alpha.grad = self.weights.grad
return inferences, weighted_loss, 0
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
if not self.args.multi_operation:
genotype = PRIMITIVES_BINARY[self.log_alpha.argmax().cpu().numpy()]
genotype_p = F.softmax(self.log_alpha, dim=-1)
else:
genotype = []
for index in self.log_alpha.argmax(axis=1).cpu().numpy():
genotype.append(PRIMITIVES_BINARY[index])
genotype = ":".join(genotype[:10])
genotype_p = F.softmax(self.log_alpha, dim=-1)[:10]
return genotype, genotype_p.cpu().detach()
class NASP_v(Virtue):
def __init__(self, embedding_dim, reg, args):
super(NASP_v, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
if self.args.multi_operation:
num_op = len(self.columns)
self.log_alpha = torch.nn.Parameter(torch.zeros((int(num_op*(num_op-1)/2), len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
else:
self.log_alpha = torch.nn.Parameter(torch.zeros((1, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
self._arch_parameters = [self.log_alpha]
self.weights = Variable(torch.zeros_like(self.log_alpha))
if self.args.early_fix_arch:
self.fix_arch_index = {}
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
if self.args.trans:
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
else:
name1_embedding_trans = name1_embedding
name2_embedding_trans = name2_embedding
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def MixedBinary_all(self, embedding_p, embedding_q, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w, primitive, fc in zip(weights, PRIMITIVES_BINARY, FC)]), 0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
arch_optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features, epoch, search=True)
# if epoch < self.args.search_epoch:
# arch_optimizer.zero_grad()
# output, error_loss, loss_alpha = self.forward(features, epoch, search=True)
# else:
# output, error_loss, loss_alpha = self.forward(features, epoch, search=False)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
arch_optimizer.step()
# if epoch < self.args.search_epoch:
# arch_optimizer.step()
return np.mean(losses)
def revised_arch_index(self, epoch):
if self.args.early_fix_arch:
if epoch < self.args.search_epoch:
sort_log_alpha = torch.topk(F.softmax(self.log_alpha.data, dim=-1), 2)
argmax_index = (sort_log_alpha[0][:, 0] - sort_log_alpha[0][:, 1] >= 0.10)
for id in range(argmax_index.size(0)):
if argmax_index[id] == 1 and id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [sort_log_alpha[1][id, 0].item(),
self.log_alpha.detach().clone()[id, :]]
if epoch >= self.args.search_epoch:
#fix the arch。
max_index = torch.argmax(self.log_alpha, dim=-1)
for id in range(max_index.size(0)):
if id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [max_index[id].item(), self.log_alpha.detach().clone()[id, :]]
for key, value_lst in self.fix_arch_index.items():
self.log_alpha.data[key, :] = value_lst[1]
def forward(self, features, epoch, search):
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
self.weights.requires_grad_()
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from joblib import Parallel, delayed
names_all = []
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
if self.args.trans:
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
else:
name1_embedding_trans = name1_embedding
name2_embedding_trans = name2_embedding
names_all.append(
[name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
res = Parallel(n_jobs=8, backend="threading")(
delayed(self.MixedBinary_all)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
inferences = sum(res)
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
if not self.args.ofm:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
else:
for index in range(len(PRIMITIVES_BINARY)):
kl += self.KL_distance(self.embedding_mean[name][index].weight,
0 * torch.ones_like(self.embedding_mean[name][index].weight),
torch.log(1 + torch.exp(self.embedding_std[name][index].weight)),
0.1 * torch.ones_like(self.embedding_std[name][index].weight))
(weighted_loss + kl/features["label"].shape[0]).backward()
self.log_alpha.grad = self.weights.grad
return inferences, weighted_loss, 0
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
if not self.args.multi_operation:
genotype = PRIMITIVES_BINARY[self.log_alpha.argmax().cpu().numpy()]
genotype_p = F.softmax(self.log_alpha, dim=-1)
else:
genotype = []
for index in self.log_alpha.argmax(axis=1).cpu().numpy():
genotype.append(PRIMITIVES_BINARY[index])
genotype = ":".join(genotype[:10])
genotype_p = F.softmax(self.log_alpha, dim=-1)[:10]
return genotype, genotype_p.cpu().detach()
class MULTIPLY_v(Virtue):
def __init__(self, embedding_dim, reg, args):
super(MULTIPLY_v, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([0, 1, 0, 0, 0.0])
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
name1_embedding_trans = name1_embedding#self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = name2_embedding#self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from joblib import Parallel, delayed
names_all = []
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
names_all.append(
[name1_embedding, name2_embedding, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
res = Parallel(n_jobs=8, backend="threading")(
delayed(MixedBinary)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
inferences = sum(res)
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
if not self.args.ofm:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
else:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
(weighted_loss + kl/features["label"].shape[0]).backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM", 0
class MAX_v(MULTIPLY_v):
def __init__(self, embedding_dim, reg, args):
super(MAX_v, self).__init__(embedding_dim, reg, args)
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([0, 0, 1, 0, 0])
self.weights = Variable(torch.Tensor([0.0, 0.0, 1.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def genotype(self):
return "MAX", 0
class PLUS_v(MULTIPLY_v):
def __init__(self, embedding_dim, reg, args):
super(PLUS_v, self).__init__(embedding_dim, reg, args)
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([1, 0, 0, 0, 0.0])
self.weights = Variable(torch.Tensor([1.0, 0.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def genotype(self):
return "PLUS", 0
class MIN_v(MULTIPLY_v):
def __init__(self, embedding_dim, reg, args):
super(MIN_v, self).__init__(embedding_dim, reg, args)
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([0, 0, 0, 1, 0.0])
self.weights = Variable(torch.Tensor([0.0, 0.0, 0.0, 1.0, 0.0]))
self.rand_array = torch.randn(3000000)
def genotype(self):
return "MIN", 0
class CONCAT_v(MULTIPLY_v):
def __init__(self, embedding_dim, reg, args):
super(CONCAT_v, self).__init__(embedding_dim, reg, args)
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([0, 0, 0, 0, 1.0])
self.weights = Variable(torch.Tensor([0.0, 0.0, 0.0, 0.0, 1.0]))
self.rand_array = torch.randn(3000000)
def genotype(self):
return "CONCAT", 0
| 50,650 | 49.905528 | 176 | py |
AutoCO | AutoCO-main/exp_public/mushroom/simulate/train.py | import numpy as np
import time
def train(train_queue, model, optimizer, arch_optimizer, logging):
rewards_all = []
losses_all = []
for step, features in enumerate(train_queue):
rewards = model.recommend(features)
rewards_all.append(rewards)
losses = model.step(optimizer, arch_optimizer, step)
losses_all.append(losses)
#print("losses: ", losses, "rewards: ", rewards)
print("model's log_alpha", model.log_alpha)
logging.info("step: %s, rewards: %s"%(step, rewards))
print("total_reward: ", sum(rewards_all))
g, gp = model.genotype()
return g, gp, np.mean(losses_all), sum(rewards_all)
| 608 | 32.833333 | 66 | py |
Subsets and Splits