text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Fits trapezoid model to provided ts,fs
<END_TASK>
<USER_TASK:>
Description:
def fit_traptransit(ts,fs,p0):
"""
Fits trapezoid model to provided ts,fs
""" |
pfit,success = leastsq(traptransit_resid,p0,args=(ts,fs))
if success not in [1,2,3,4]:
raise NoFitError
#logging.debug('success = {}'.format(success))
return pfit |
<SYSTEM_TASK:>
Replaces the contents of a file with its decrypted counterpart, storing the
<END_TASK>
<USER_TASK:>
Description:
def backup(path, password_file=None):
"""
Replaces the contents of a file with its decrypted counterpart, storing the
original encrypted version and a hash of the file contents for later
retrieval.
""" |
vault = VaultLib(get_vault_password(password_file))
with open(path, 'r') as f:
encrypted_data = f.read()
# Normally we'd just try and catch the exception, but the
# exception raised here is not very specific (just
# `AnsibleError`), so this feels safer to avoid suppressing
# other things that might go wrong.
if vault.is_encrypted(encrypted_data):
decrypted_data = vault.decrypt(encrypted_data)
# Create atk vault files
atk_path = os.path.join(ATK_VAULT, path)
mkdir_p(atk_path)
# ... encrypted
with open(os.path.join(atk_path, 'encrypted'), 'wb') as f:
f.write(encrypted_data)
# ... hash
with open(os.path.join(atk_path, 'hash'), 'wb') as f:
f.write(hashlib.sha1(decrypted_data).hexdigest())
# Replace encrypted file with decrypted one
with open(path, 'wb') as f:
f.write(decrypted_data) |
<SYSTEM_TASK:>
Retrieves a file from the atk vault and restores it to its original
<END_TASK>
<USER_TASK:>
Description:
def restore(path, password_file=None):
"""
Retrieves a file from the atk vault and restores it to its original
location, re-encrypting it if it has changed.
:param path: path to original file
""" |
vault = VaultLib(get_vault_password(password_file))
atk_path = os.path.join(ATK_VAULT, path)
# Load stored data
with open(os.path.join(atk_path, 'encrypted'), 'rb') as f:
old_data = f.read()
with open(os.path.join(atk_path, 'hash'), 'rb') as f:
old_hash = f.read()
# Load new data
with open(path, 'rb') as f:
new_data = f.read()
new_hash = hashlib.sha1(new_data).hexdigest()
# Determine whether to re-encrypt
if old_hash != new_hash:
new_data = vault.encrypt(new_data)
else:
new_data = old_data
# Update file
with open(path, 'wb') as f:
f.write(new_data)
# Clean atk vault
os.remove(os.path.join(atk_path, 'encrypted'))
os.remove(os.path.join(atk_path, 'hash')) |
<SYSTEM_TASK:>
Appends stars from another StarPopulations, in place.
<END_TASK>
<USER_TASK:>
Description:
def append(self, other):
"""Appends stars from another StarPopulations, in place.
:param other:
Another :class:`StarPopulation`; must have same columns as ``self``.
""" |
if not isinstance(other,StarPopulation):
raise TypeError('Only StarPopulation objects can be appended to a StarPopulation.')
if not np.all(self.stars.columns == other.stars.columns):
raise ValueError('Two populations must have same columns to combine them.')
if len(self.constraints) > 0:
logging.warning('All constraints are cleared when appending another population.')
self.stars = pd.concat((self.stars, other.stars))
if self.orbpop is not None and other.orbpop is not None:
self.orbpop = self.orbpop + other.orbpop |
<SYSTEM_TASK:>
Bandpasses for which StarPopulation has magnitude data
<END_TASK>
<USER_TASK:>
Description:
def bands(self):
"""
Bandpasses for which StarPopulation has magnitude data
""" |
bands = []
for c in self.stars.columns:
if re.search('_mag',c):
bands.append(c)
return bands |
<SYSTEM_TASK:>
Boolean array showing which stars pass all distribution constraints.
<END_TASK>
<USER_TASK:>
Description:
def distok(self):
"""
Boolean array showing which stars pass all distribution constraints.
A "distribution constraint" is a constraint that affects the
distribution of stars, rather than just the number.
""" |
ok = np.ones(len(self.stars)).astype(bool)
for name in self.constraints:
c = self.constraints[name]
if c.name not in self.distribution_skip:
ok &= c.ok
return ok |
<SYSTEM_TASK:>
Boolean array showing which stars pass all count constraints.
<END_TASK>
<USER_TASK:>
Description:
def countok(self):
"""
Boolean array showing which stars pass all count constraints.
A "count constraint" is a constraint that affects the number of stars.
""" |
ok = np.ones(len(self.stars)).astype(bool)
for name in self.constraints:
c = self.constraints[name]
if c.name not in self.selectfrac_skip:
ok &= c.ok
return ok |
<SYSTEM_TASK:>
Makes a 2d density histogram of two given properties
<END_TASK>
<USER_TASK:>
Description:
def prophist2d(self,propx,propy, mask=None,
logx=False,logy=False,
fig=None,selected=False,**kwargs):
"""Makes a 2d density histogram of two given properties
:param propx,propy:
Names of properties to histogram. Must be names of columns
in ``self.stars`` table.
:param mask: (optional)
Boolean mask (``True`` is good) to say which indices to plot.
Must be same length as ``self.stars``.
:param logx,logy: (optional)
Whether to plot the log10 of x and/or y properties.
:param fig: (optional)
Argument passed to :func:`plotutils.setfig`.
:param selected: (optional)
If ``True``, then only the "selected" stars (that is, stars
obeying all distribution constraints attached to this object)
will be plotted. In this case, ``mask`` will be ignored.
:param kwargs:
Additional keyword arguments passed to :func:`plotutils.plot2dhist`.
""" |
if mask is not None:
inds = np.where(mask)[0]
else:
if selected:
inds = self.selected.index
else:
inds = self.stars.index
if selected:
xvals = self.selected[propx].iloc[inds].values
yvals = self.selected[propy].iloc[inds].values
else:
if mask is None:
mask = np.ones_like(self.stars.index)
xvals = self.stars[mask][propx].values
yvals = self.stars[mask][propy].values
#forward-hack for EclipsePopulations...
#TODO: reorganize.
if propx=='depth' and hasattr(self,'depth'):
xvals = self.depth.iloc[inds].values
if propy=='depth' and hasattr(self,'depth'):
yvals = self.depth.iloc[inds].values
if logx:
xvals = np.log10(xvals)
if logy:
yvals = np.log10(yvals)
plot2dhist(xvals,yvals,fig=fig,**kwargs)
plt.xlabel(propx)
plt.ylabel(propy) |
<SYSTEM_TASK:>
Plots a 1-d histogram of desired property.
<END_TASK>
<USER_TASK:>
Description:
def prophist(self,prop,fig=None,log=False, mask=None,
selected=False,**kwargs):
"""Plots a 1-d histogram of desired property.
:param prop:
Name of property to plot. Must be column of ``self.stars``.
:param fig: (optional)
Argument for :func:`plotutils.setfig`
:param log: (optional)
Whether to plot the histogram of log10 of the property.
:param mask: (optional)
Boolean array (length of ``self.stars``) to say
which indices to plot (``True`` is good).
:param selected: (optional)
If ``True``, then only the "selected" stars (that is, stars
obeying all distribution constraints attached to this object)
will be plotted. In this case, ``mask`` will be ignored.
:param **kwargs:
Additional keyword arguments passed to :func:`plt.hist`.
""" |
setfig(fig)
inds = None
if mask is not None:
inds = np.where(mask)[0]
elif inds is None:
if selected:
#inds = np.arange(len(self.selected))
inds = self.selected.index
else:
#inds = np.arange(len(self.stars))
inds = self.stars.index
if selected:
vals = self.selected[prop].values#.iloc[inds] #invalidates mask?
else:
vals = self.stars[prop].iloc[inds].values
if prop=='depth' and hasattr(self,'depth'):
vals *= self.dilution_factor[inds]
if log:
h = plt.hist(np.log10(vals),**kwargs)
else:
h = plt.hist(vals,**kwargs)
plt.xlabel(prop) |
<SYSTEM_TASK:>
Returns information about effect of constraints on population.
<END_TASK>
<USER_TASK:>
Description:
def constraint_stats(self,primarylist=None):
"""Returns information about effect of constraints on population.
:param primarylist:
List of constraint names that you want specific information on
(i.e., not blended within "multiple constraints".)
:return:
``dict`` of what percentage of population is ruled out by
each constraint, including a "multiple constraints" entry.
""" |
if primarylist is None:
primarylist = []
n = len(self.stars)
primaryOK = np.ones(n).astype(bool)
tot_reject = np.zeros(n)
for name in self.constraints:
if name in self.selectfrac_skip:
continue
c = self.constraints[name]
if name in primarylist:
primaryOK &= c.ok
tot_reject += ~c.ok
primary_rejected = ~primaryOK
secondary_rejected = tot_reject - primary_rejected
lone_reject = {}
for name in self.constraints:
if name in primarylist or name in self.selectfrac_skip:
continue
c = self.constraints[name]
lone_reject[name] = ((secondary_rejected==1) & (~primary_rejected) & (~c.ok)).sum()/float(n)
mult_rejected = (secondary_rejected > 1) & (~primary_rejected)
not_rejected = ~(tot_reject.astype(bool))
primary_reject_pct = primary_rejected.sum()/float(n)
mult_reject_pct = mult_rejected.sum()/float(n)
not_reject_pct = not_rejected.sum()/float(n)
tot = 0
results = {}
results['pri'] = primary_reject_pct
tot += primary_reject_pct
for name in lone_reject:
results[name] = lone_reject[name]
tot += lone_reject[name]
results['multiple constraints'] = mult_reject_pct
tot += mult_reject_pct
results['remaining'] = not_reject_pct
tot += not_reject_pct
if tot != 1:
logging.warning('total adds up to: %.2f (%s)' % (tot,self.model))
return results |
<SYSTEM_TASK:>
Makes piechart illustrating constraints on population
<END_TASK>
<USER_TASK:>
Description:
def constraint_piechart(self,primarylist=None,
fig=None,title='',colordict=None,
legend=True,nolabels=False):
"""Makes piechart illustrating constraints on population
:param primarylist: (optional)
List of most import constraints to show (see
:func:`StarPopulation.constraint_stats`)
:param fig: (optional)
Passed to :func:`plotutils.setfig`.
:param title: (optional)
Title for pie chart
:param colordict: (optional)
Dictionary describing colors (keys are constraint names).
:param legend: (optional)
``bool`` indicating whether to display a legend.
:param nolabels: (optional)
If ``True``, then leave out legend labels.
""" |
setfig(fig,figsize=(6,6))
stats = self.constraint_stats(primarylist=primarylist)
if primarylist is None:
primarylist = []
if len(primarylist)==1:
primaryname = primarylist[0]
else:
primaryname = ''
for name in primarylist:
primaryname += '%s,' % name
primaryname = primaryname[:-1]
fracs = []
labels = []
explode = []
colors = []
fracs.append(stats['remaining']*100)
labels.append('remaining')
explode.append(0.05)
colors.append('b')
if 'pri' in stats and stats['pri']>=0.005:
fracs.append(stats['pri']*100)
labels.append(primaryname)
explode.append(0)
if colordict is not None:
colors.append(colordict[primaryname])
for name in stats:
if name == 'pri' or \
name == 'multiple constraints' or \
name == 'remaining':
continue
fracs.append(stats[name]*100)
labels.append(name)
explode.append(0)
if colordict is not None:
colors.append(colordict[name])
if stats['multiple constraints'] >= 0.005:
fracs.append(stats['multiple constraints']*100)
labels.append('multiple constraints')
explode.append(0)
colors.append('w')
autopct = '%1.1f%%'
if nolabels:
labels = None
if legend:
legendlabels = []
for i,l in enumerate(labels):
legendlabels.append('%s (%.1f%%)' % (l,fracs[i]))
labels = None
autopct = ''
if colordict is None:
plt.pie(fracs,labels=labels,autopct=autopct,explode=explode)
else:
plt.pie(fracs,labels=labels,autopct=autopct,explode=explode,
colors=colors)
if legend:
plt.legend(legendlabels,bbox_to_anchor=(-0.05,0),
loc='lower left',prop={'size':10})
plt.title(title) |
<SYSTEM_TASK:>
Constraints applied to the population.
<END_TASK>
<USER_TASK:>
Description:
def constraints(self):
"""
Constraints applied to the population.
""" |
try:
return self._constraints
except AttributeError:
self._constraints = ConstraintDict()
return self._constraints |
<SYSTEM_TASK:>
Constraints applied to the population, but temporarily removed.
<END_TASK>
<USER_TASK:>
Description:
def hidden_constraints(self):
"""
Constraints applied to the population, but temporarily removed.
""" |
try:
return self._hidden_constraints
except AttributeError:
self._hidden_constraints = ConstraintDict()
return self._hidden_constraints |
<SYSTEM_TASK:>
Apply a constraint to the population
<END_TASK>
<USER_TASK:>
Description:
def apply_constraint(self,constraint,selectfrac_skip=False,
distribution_skip=False,overwrite=False):
"""Apply a constraint to the population
:param constraint:
Constraint to apply.
:type constraint:
:class:`Constraint`
:param selectfrac_skip: (optional)
If ``True``, then this constraint will not be considered
towards diminishing the
""" |
#grab properties
constraints = self.constraints
my_selectfrac_skip = self.selectfrac_skip
my_distribution_skip = self.distribution_skip
if constraint.name in constraints and not overwrite:
logging.warning('constraint already applied: {}'.format(constraint.name))
return
constraints[constraint.name] = constraint
if selectfrac_skip:
my_selectfrac_skip.append(constraint.name)
if distribution_skip:
my_distribution_skip.append(constraint.name)
#forward-looking for EclipsePopulation
if hasattr(self, '_make_kde'):
self._make_kde()
self.constraints = constraints
self.selectfrac_skip = my_selectfrac_skip
self.distribution_skip = my_distribution_skip |
<SYSTEM_TASK:>
Re-apply constraint that had been removed
<END_TASK>
<USER_TASK:>
Description:
def replace_constraint(self,name,selectfrac_skip=False,distribution_skip=False):
"""
Re-apply constraint that had been removed
:param name:
Name of constraint to replace
:param selectfrac_skip,distribution_skip: (optional)
Same as :func:`StarPopulation.apply_constraint`
""" |
hidden_constraints = self.hidden_constraints
if name in hidden_constraints:
c = hidden_constraints[name]
self.apply_constraint(c,selectfrac_skip=selectfrac_skip,
distribution_skip=distribution_skip)
del hidden_constraints[name]
else:
logging.warning('Constraint {} not available for replacement.'.format(name))
self.hidden_constraints = hidden_constraints |
<SYSTEM_TASK:>
Apply constraint that constrains property.
<END_TASK>
<USER_TASK:>
Description:
def constrain_property(self,prop,lo=-np.inf,hi=np.inf,
measurement=None,thresh=3,
selectfrac_skip=False,distribution_skip=False):
"""Apply constraint that constrains property.
:param prop:
Name of property. Must be column in ``self.stars``.
:type prop:
``str``
:param lo,hi: (optional)
Low and high allowed values for ``prop``. Defaults
to ``-np.inf`` and ``np.inf`` to allow for defining
only lower or upper limits if desired.
:param measurement: (optional)
Value and error of measurement in form ``(value, error)``.
:param thresh: (optional)
Number of "sigma" to allow for measurement constraint.
:param selectfrac_skip,distribution_skip:
Passed to :func:`StarPopulation.apply_constraint`.
""" |
if prop in self.constraints:
logging.info('re-doing {} constraint'.format(prop))
self.remove_constraint(prop)
if measurement is not None:
val,dval = measurement
self.apply_constraint(MeasurementConstraint(getattr(self.stars,prop),
val,dval,name=prop,
thresh=thresh),
selectfrac_skip=selectfrac_skip,
distribution_skip=distribution_skip)
else:
self.apply_constraint(RangeConstraint(getattr(self.stars,prop),
lo=lo,hi=hi,name=prop),
selectfrac_skip=selectfrac_skip,
distribution_skip=distribution_skip) |
<SYSTEM_TASK:>
Constrains change in RV to be less than limit over time dt.
<END_TASK>
<USER_TASK:>
Description:
def apply_trend_constraint(self, limit, dt, distribution_skip=False,
**kwargs):
"""
Constrains change in RV to be less than limit over time dt.
Only works if ``dRV`` and ``Plong`` attributes are defined
for population.
:param limit:
Radial velocity limit on trend. Must be
:class:`astropy.units.Quantity` object, or
else interpreted as m/s.
:param dt:
Time baseline of RV observations. Must be
:class:`astropy.units.Quantity` object; else
interpreted as days.
:param distribution_skip:
This is by default ``True``. *To be honest, I'm not
exactly sure why. Might be important, might not
(don't remember).*
:param **kwargs:
Additional keyword arguments passed to
:func:`StarPopulation.apply_constraint`.
""" |
if type(limit) != Quantity:
limit = limit * u.m/u.s
if type(dt) != Quantity:
dt = dt * u.day
dRVs = np.absolute(self.dRV(dt))
c1 = UpperLimit(dRVs, limit)
c2 = LowerLimit(self.Plong, dt*4)
self.apply_constraint(JointConstraintOr(c1,c2,name='RV monitoring',
Ps=self.Plong,dRVs=dRVs),
distribution_skip=distribution_skip, **kwargs) |
<SYSTEM_TASK:>
Apply contrast-curve constraint to population.
<END_TASK>
<USER_TASK:>
Description:
def apply_cc(self, cc, distribution_skip=False,
**kwargs):
"""
Apply contrast-curve constraint to population.
Only works if object has ``Rsky``, ``dmag`` attributes
:param cc:
Contrast curve.
:type cc:
:class:`ContrastCurveConstraint`
:param distribution_skip:
This is by default ``True``. *To be honest, I'm not
exactly sure why. Might be important, might not
(don't remember).*
:param **kwargs:
Additional keyword arguments passed to
:func:`StarPopulation.apply_constraint`.
""" |
rs = self.Rsky.to('arcsec').value
dmags = self.dmag(cc.band)
self.apply_constraint(ContrastCurveConstraint(rs,dmags,cc,name=cc.name),
distribution_skip=distribution_skip, **kwargs) |
<SYSTEM_TASK:>
Applies "velocity contrast curve" to population.
<END_TASK>
<USER_TASK:>
Description:
def apply_vcc(self, vcc, distribution_skip=False,
**kwargs):
"""
Applies "velocity contrast curve" to population.
That is, the constraint that comes from not seeing two sets
of spectral lines in a high resolution spectrum.
Only works if population has ``dmag`` and ``RV`` attributes.
:param vcc:
Velocity contrast curve; dmag vs. delta-RV.
:type cc:
:class:`VelocityContrastCurveConstraint`
:param distribution_skip:
This is by default ``True``. *To be honest, I'm not
exactly sure why. Might be important, might not
(don't remember).*
:param **kwargs:
Additional keyword arguments passed to
:func:`StarPopulation.apply_constraint`.
""" |
rvs = self.RV.value
dmags = self.dmag(vcc.band)
self.apply_constraint(VelocityContrastCurveConstraint(rvs,dmags,vcc,
name='secondary spectrum'),
distribution_skip=distribution_skip, **kwargs) |
<SYSTEM_TASK:>
Adds a constraint that rejects everything with Rsky > maxrad
<END_TASK>
<USER_TASK:>
Description:
def set_maxrad(self,maxrad, distribution_skip=True):
"""
Adds a constraint that rejects everything with Rsky > maxrad
Requires ``Rsky`` attribute, which should always have units.
:param maxrad:
The maximum angular value of Rsky.
:type maxrad:
:class:`astropy.units.Quantity`
:param distribution_skip:
This is by default ``True``. *To be honest, I'm not
exactly sure why. Might be important, might not
(don't remember).*
""" |
self.maxrad = maxrad
self.apply_constraint(UpperLimit(self.Rsky,maxrad,
name='Max Rsky'),
overwrite=True,
distribution_skip=distribution_skip) |
<SYSTEM_TASK:>
A DataFrame representing all constraints, hidden or not
<END_TASK>
<USER_TASK:>
Description:
def constraint_df(self):
"""
A DataFrame representing all constraints, hidden or not
""" |
df = pd.DataFrame()
for name,c in self.constraints.items():
df[name] = c.ok
for name,c in self.hidden_constraints.items():
df[name] = c.ok
return df |
<SYSTEM_TASK:>
Saves to HDF5 file.
<END_TASK>
<USER_TASK:>
Description:
def save_hdf(self,filename,path='',properties=None,
overwrite=False, append=False):
"""Saves to HDF5 file.
Subclasses should be sure to define
``_properties`` attribute to ensure that all
correct attributes get saved. Load a saved population
with :func:`StarPopulation.load_hdf`.
Example usage::
>>> from vespa.stars import Raghavan_BinaryPopulation, StarPopulation
>>> pop = Raghavan_BinaryPopulation(1., n=1000)
>>> pop.save_hdf('test.h5')
>>> pop2 = StarPopulation.load_hdf('test.h5')
>>> pop == pop2
True
>>> pop3 = Ragahavan_BinaryPopulation.load_hdf('test.h5')
>>> pop3 == pop2
True
:param filename:
Name of HDF file.
:param path: (optional)
Path within HDF file to save object.
:param properties: (optional)
Names of any properties (in addition to
those defined in ``_properties`` attribute)
that you wish to save. (This is an old
keyword, and should probably be removed.
Feel free to ignore it.)
:param overwrite: (optional)
Whether to overwrite file if it already
exists. If ``True``, then any existing file
will be deleted before object is saved. Use
``append`` if you don't wish this to happen.
:param append: (optional)
If ``True``, then if the file exists,
then only the particular path in the file
will get written/overwritten. If ``False`` and both
file and path exist, then an ``IOError`` will
be raised. If ``False`` and file exists but not
path, then no error will be raised.
""" |
if os.path.exists(filename):
with pd.HDFStore(filename) as store:
if path in store:
if overwrite:
os.remove(filename)
elif not append:
raise IOError('{} in {} exists. '.format(path,filename) +
'Set either overwrite or append option.')
if properties is None:
properties = {}
for prop in self._properties:
properties[prop] = getattr(self, prop)
self.stars.to_hdf(filename,'{}/stars'.format(path))
self.constraint_df.to_hdf(filename,'{}/constraints'.format(path))
if self.orbpop is not None:
self.orbpop.save_hdf(filename, path=path+'/orbpop')
with pd.HDFStore(filename) as store:
attrs = store.get_storer('{}/stars'.format(path)).attrs
attrs.selectfrac_skip = self.selectfrac_skip
attrs.distribution_skip = self.distribution_skip
attrs.name = self.name
attrs.poptype = type(self)
attrs.properties = properties |
<SYSTEM_TASK:>
Loads StarPopulation from .h5 file
<END_TASK>
<USER_TASK:>
Description:
def load_hdf(cls, filename, path=''):
"""Loads StarPopulation from .h5 file
Correct properties should be restored to object, and object
will be original type that was saved. Complement to
:func:`StarPopulation.save_hdf`.
Example usage::
>>> from vespa.stars import Raghavan_BinaryPopulation, StarPopulation
>>> pop = Raghavan_BinaryPopulation(1., n=1000)
>>> pop.save_hdf('test.h5')
>>> pop2 = StarPopulation.load_hdf('test.h5')
>>> pop == pop2
True
>>> pop3 = Ragahavan_BinaryPopulation.load_hdf('test.h5')
>>> pop3 == pop2
True
:param filename:
HDF file with saved :class:`StarPopulation`.
:param path:
Path within HDF file.
:return:
:class:`StarPopulation` or appropriate subclass; whatever
was saved with :func:`StarPopulation.save_hdf`.
""" |
stars = pd.read_hdf(filename,path+'/stars')
constraint_df = pd.read_hdf(filename,path+'/constraints')
with pd.HDFStore(filename) as store:
has_orbpop = '{}/orbpop/df'.format(path) in store
has_triple_orbpop = '{}/orbpop/long/df'.format(path) in store
attrs = store.get_storer('{}/stars'.format(path)).attrs
poptype = attrs.poptype
new = poptype()
#if poptype != type(self):
# raise TypeError('Saved population is {}. Please instantiate proper class before loading.'.format(poptype))
distribution_skip = attrs.distribution_skip
selectfrac_skip = attrs.selectfrac_skip
name = attrs.name
for kw,val in attrs.properties.items():
setattr(new, kw, val)
#load orbpop if there
orbpop = None
if has_orbpop:
orbpop = OrbitPopulation.load_hdf(filename, path=path+'/orbpop')
elif has_triple_orbpop:
orbpop = TripleOrbitPopulation.load_hdf(filename, path=path+'/orbpop')
new.stars = stars
new.orbpop = orbpop
for n in constraint_df.columns:
mask = np.array(constraint_df[n])
c = Constraint(mask,name=n)
sel_skip = n in selectfrac_skip
dist_skip = n in distribution_skip
new.apply_constraint(c,selectfrac_skip=sel_skip,
distribution_skip=dist_skip)
return new |
<SYSTEM_TASK:>
Difference in magnitude between primary and secondary stars
<END_TASK>
<USER_TASK:>
Description:
def dmag(self,band):
"""
Difference in magnitude between primary and secondary stars
:param band:
Photometric bandpass.
""" |
mag2 = self.stars['{}_mag_B'.format(band)]
mag1 = self.stars['{}_mag_A'.format(band)]
return mag2-mag1 |
<SYSTEM_TASK:>
Distribution of projected separations
<END_TASK>
<USER_TASK:>
Description:
def rsky_distribution(self,rmax=None,smooth=0.1,nbins=100):
"""
Distribution of projected separations
Returns a :class:`simpledists.Hist_Distribution` object.
:param rmax: (optional)
Maximum radius to calculate distribution.
:param dr: (optional)
Bin width for histogram
:param smooth: (optional)
Smoothing parameter for :class:`simpledists.Hist_Distribution`
:param nbins: (optional)
Number of bins for histogram
:return:
:class:`simpledists.Hist_Distribution` describing Rsky distribution
""" |
if rmax is None:
if hasattr(self,'maxrad'):
rmax = self.maxrad
else:
rmax = np.percentile(self.Rsky,99)
dist = dists.Hist_Distribution(self.Rsky.value,bins=nbins,maxval=rmax,smooth=smooth)
return dist |
<SYSTEM_TASK:>
Function that generates population.
<END_TASK>
<USER_TASK:>
Description:
def generate(self, M, age=9.6, feh=0.0,
ichrone='mist', n=1e4, bands=None, **kwargs):
"""
Function that generates population.
Called by ``__init__`` if ``M`` is passed.
""" |
ichrone = get_ichrone(ichrone, bands=bands)
if np.size(M) > 1:
n = np.size(M)
else:
n = int(n)
M2 = M * self.q_fn(n, qmin=np.maximum(self.qmin,self.minmass/M))
P = self.P_fn(n)
ecc = self.ecc_fn(n,P)
mass = np.ascontiguousarray(np.ones(n)*M)
mass2 = np.ascontiguousarray(M2)
age = np.ascontiguousarray(age)
feh = np.ascontiguousarray(feh)
pri = ichrone(mass, age, feh, return_df=True, bands=bands)
sec = ichrone(mass2, age, feh, return_df=True, bands=bands)
BinaryPopulation.__init__(self, primary=pri, secondary=sec,
period=P, ecc=ecc, **kwargs)
return self |
<SYSTEM_TASK:>
Difference in magnitudes between fainter and brighter components in band.
<END_TASK>
<USER_TASK:>
Description:
def dmag(self, band):
"""
Difference in magnitudes between fainter and brighter components in band.
:param band:
Photometric bandpass.
""" |
m1 = self.stars['{}_mag_A'.format(band)]
m2 = addmags(self.stars['{}_mag_B'.format(band)],
self.stars['{}_mag_C'.format(band)])
return np.abs(m2-m1) |
<SYSTEM_TASK:>
Returns dRV of star A, if A is brighter than B+C, or of star B if B+C is brighter
<END_TASK>
<USER_TASK:>
Description:
def dRV(self, dt, band='g'):
"""Returns dRV of star A, if A is brighter than B+C, or of star B if B+C is brighter
""" |
return (self.orbpop.dRV_1(dt)*self.A_brighter(band) +
self.orbpop.dRV_2(dt)*self.BC_brighter(band)) |
<SYSTEM_TASK:>
Triple fraction of stars following given query
<END_TASK>
<USER_TASK:>
Description:
def triple_fraction(self,query='mass_A > 0', unc=False):
"""
Triple fraction of stars following given query
""" |
subdf = self.stars.query(query)
ntriples = ((subdf['mass_B'] > 0) & (subdf['mass_C'] > 0)).sum()
frac = ntriples/len(subdf)
if unc:
return frac, frac/np.sqrt(ntriples)
else:
return frac |
<SYSTEM_TASK:>
Magnitude difference between primary star and BG stars
<END_TASK>
<USER_TASK:>
Description:
def dmag(self,band):
"""
Magnitude difference between primary star and BG stars
""" |
if self.mags is None:
raise ValueError('dmag is not defined because primary mags are not defined for this population.')
return self.stars['{}_mag'.format(band)] - self.mags[band] |
<SYSTEM_TASK:>
I receive data+hash, check for a match, confirm or not
<END_TASK>
<USER_TASK:>
Description:
def receive(self):
"""I receive data+hash, check for a match, confirm or not
confirm to the sender, and return the data payload.
""" |
def _receive(input_message):
self.data = input_message[:-64]
_hash = input_message[-64:]
if h.sha256(self.data).hexdigest() == _hash:
self._w.send_message('Confirmed!')
else:
self._w.send_message('Not Confirmed!')
yield self.start_tor()
self._w = wormhole.create(u'axotor', RENDEZVOUS_RELAY, self._reactor,
tor=self._tor, timing=self._timing)
self._w.set_code(self._code)
yield self._w.get_message().addCallback(_receive)
yield self._w.close()
self._reactor.stop()
return |
<SYSTEM_TASK:>
Update screen if necessary and release the lock so receiveThread can run
<END_TASK>
<USER_TASK:>
Description:
def validator(ch):
"""
Update screen if necessary and release the lock so receiveThread can run
""" |
global screen_needs_update
try:
if screen_needs_update:
curses.doupdate()
screen_needs_update = False
return ch
finally:
winlock.release()
sleep(0.01) # let receiveThread in if necessary
winlock.acquire() |
<SYSTEM_TASK:>
Returns copy of constraint, with mask rearranged according to indices
<END_TASK>
<USER_TASK:>
Description:
def resample(self, inds):
"""Returns copy of constraint, with mask rearranged according to indices
""" |
new = copy.deepcopy(self)
for arr in self.arrays:
x = getattr(new, arr)
setattr(new, arr, x[inds])
return new |
<SYSTEM_TASK:>
Saves PopulationSet and TransitSignal.
<END_TASK>
<USER_TASK:>
Description:
def save(self, overwrite=True):
"""
Saves PopulationSet and TransitSignal.
Shouldn't need to use this if you're using
:func:`FPPCalculation.from_ini`.
Saves :class`PopulationSet` to ``[folder]/popset.h5]``
and :class:`TransitSignal` to ``[folder]/trsig.pkl``.
:param overwrite: (optional)
Whether to overwrite existing files.
""" |
self.save_popset(overwrite=overwrite)
self.save_signal() |
<SYSTEM_TASK:>
Loads PopulationSet from folder
<END_TASK>
<USER_TASK:>
Description:
def load(cls, folder):
"""
Loads PopulationSet from folder
``popset.h5`` and ``trsig.pkl`` must exist in folder.
:param folder:
Folder from which to load.
""" |
popset = PopulationSet.load_hdf(os.path.join(folder,'popset.h5'))
sigfile = os.path.join(folder,'trsig.pkl')
with open(sigfile, 'rb') as f:
trsig = pickle.load(f)
return cls(trsig, popset, folder=folder) |
<SYSTEM_TASK:>
Make FPP diagnostic plots
<END_TASK>
<USER_TASK:>
Description:
def FPPplots(self, folder=None, format='png', tag=None, **kwargs):
"""
Make FPP diagnostic plots
Makes likelihood "fuzz plot" for each model, a FPP summary figure,
a plot of the :class:`TransitSignal`, and writes a ``results.txt``
file.
:param folder: (optional)
Destination folder for plots/``results.txt``. Default
is ``self.folder``.
:param format: (optional)
Desired format of figures. e.g. ``png``, ``pdf``...
:param tag: (optional)
If this is provided (string), then filenames will have
``_[tag]`` appended to the filename, before the extension.
:param **kwargs:
Additional keyword arguments passed to :func:`PopulationSet.lhoodplots`.
""" |
if folder is None:
folder = self.folder
self.write_results(folder=folder)
self.lhoodplots(folder=folder,figformat=format,tag=tag,**kwargs)
self.FPPsummary(folder=folder,saveplot=True,figformat=format,tag=tag)
self.plotsignal(folder=folder,saveplot=True,figformat=format) |
<SYSTEM_TASK:>
Writes text file of calculation summary.
<END_TASK>
<USER_TASK:>
Description:
def write_results(self,folder=None, filename='results.txt', to_file=True):
"""
Writes text file of calculation summary.
:param folder: (optional)
Folder to which to write ``results.txt``.
:param filename:
Filename to write. Default=``results.txt``.
:param to_file:
If True, then writes file. Otherwise just return header, line.
:returns:
Header string, line
""" |
if folder is None:
folder = self.folder
if to_file:
fout = open(os.path.join(folder,filename), 'w')
header = ''
for m in self.popset.shortmodelnames:
header += 'lhood_{0} L_{0} pr_{0} '.format(m)
header += 'fpV fp FPP'
if to_file:
fout.write('{} \n'.format(header))
Ls = {}
Ltot = 0
lhoods = {}
for model in self.popset.modelnames:
lhoods[model] = self.lhood(model)
Ls[model] = self.prior(model)*lhoods[model]
Ltot += Ls[model]
line = ''
for model in self.popset.modelnames:
line += '%.2e %.2e %.2e ' % (lhoods[model], Ls[model], Ls[model]/Ltot)
line += '%.3g %.3f %.2e' % (self.fpV(),self.priorfactors['fp_specific'],self.FPP())
if to_file:
fout.write(line+'\n')
fout.close()
return header, line |
<SYSTEM_TASK:>
Max secondary depth based on model-shift secondary test from Jeff Coughlin
<END_TASK>
<USER_TASK:>
Description:
def modelshift_weaksec(koi):
"""
Max secondary depth based on model-shift secondary test from Jeff Coughlin
secondary metric: mod_depth_sec_dv * (1 + 3*mod_fred_dv / mod_sig_sec_dv)
""" |
num = KOIDATA.ix[ku.koiname(koi), 'koi_tce_plnt_num']
if np.isnan(num):
num = 1
kid = KOIDATA.ix[ku.koiname(koi), 'kepid']
tce = '{:09.0f}-{:02.0f}'.format(kid,num)
#return largest depth between DV detrending and alternate detrending
try:
r = ROBOVETDATA.ix[tce]
except KeyError:
raise NoWeakSecondaryError(koi)
depth_dv = r['mod_depth_sec_dv'] * (1 + 3*r['mod_fred_dv'] / r['mod_sig_sec_dv'])
depth_alt = r['mod_depth_sec_alt'] * (1 + 3*r['mod_fred_alt'] / r['mod_sig_sec_alt'])
logging.debug(r[['mod_depth_sec_dv','mod_fred_dv','mod_sig_sec_dv']])
logging.debug(r[['mod_depth_sec_alt','mod_fred_alt','mod_sig_sec_alt']])
if np.isnan(depth_dv) and np.isnan(depth_alt):
#return weaksec_vv2(koi)
raise NoWeakSecondaryError(koi)
elif np.isnan(depth_dv):
return depth_alt
elif np.isnan(depth_alt):
return depth_dv
else:
return max(depth_dv, depth_alt) |
<SYSTEM_TASK:>
Returns true if provenance of property is SPE or AST
<END_TASK>
<USER_TASK:>
Description:
def use_property(kepid, prop):
"""Returns true if provenance of property is SPE or AST
""" |
try:
prov = kicu.DATA.ix[kepid, '{}_prov'.format(prop)]
return any([prov.startswith(s) for s in ['SPE', 'AST']])
except KeyError:
raise MissingStellarError('{} not in stellar table?'.format(kepid)) |
<SYSTEM_TASK:>
returns config object for given KOI
<END_TASK>
<USER_TASK:>
Description:
def fpp_config(koi, **kwargs):
"""returns config object for given KOI
""" |
folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))
if not os.path.exists(folder):
os.makedirs(folder)
config = ConfigObj(os.path.join(folder,'fpp.ini'))
koi = ku.koiname(koi)
rowefit = jrowe_fit(koi)
config['name'] = koi
ra,dec = ku.radec(koi)
config['ra'] = ra
config['dec'] = dec
config['rprs'] = rowefit.ix['RD1','val']
config['period'] = rowefit.ix['PE1', 'val']
config['starfield'] = kepler_starfield_file(koi)
for kw,val in kwargs.items():
config[kw] = val
config['constraints'] = {}
config['constraints']['maxrad'] = default_r_exclusion(koi)
try:
config['constraints']['secthresh'] = pipeline_weaksec(koi)
except NoWeakSecondaryError:
pass
return config |
<SYSTEM_TASK:>
Applies default secthresh & exclusion radius constraints
<END_TASK>
<USER_TASK:>
Description:
def apply_default_constraints(self):
"""Applies default secthresh & exclusion radius constraints
""" |
try:
self.apply_secthresh(pipeline_weaksec(self.koi))
except NoWeakSecondaryError:
logging.warning('No secondary eclipse threshold set for {}'.format(self.koi))
self.set_maxrad(default_r_exclusion(self.koi)) |
<SYSTEM_TASK:>
Returns the SHA for the original file that was changed in a diff part.
<END_TASK>
<USER_TASK:>
Description:
def get_old_sha(diff_part):
"""
Returns the SHA for the original file that was changed in a diff part.
""" |
r = re.compile(r'index ([a-fA-F\d]*)')
return r.search(diff_part).groups()[0] |
<SYSTEM_TASK:>
Returns the filename for the original file that was changed in a diff part.
<END_TASK>
<USER_TASK:>
Description:
def get_old_filename(diff_part):
"""
Returns the filename for the original file that was changed in a diff part.
""" |
regexps = (
# e.g. "+++ a/foo/bar"
r'^--- a/(.*)',
# e.g. "+++ /dev/null"
r'^\-\-\- (.*)',
)
for regexp in regexps:
r = re.compile(regexp, re.MULTILINE)
match = r.search(diff_part)
if match is not None:
return match.groups()[0]
raise MalformedGitDiff("No old filename in diff part found. "
"Examined diff part: {}".format(diff_part)) |
<SYSTEM_TASK:>
Returns the filename for the updated file in a diff part.
<END_TASK>
<USER_TASK:>
Description:
def get_new_filename(diff_part):
"""
Returns the filename for the updated file in a diff part.
""" |
regexps = (
# e.g. "+++ b/foo/bar"
r'^\+\+\+ b/(.*)',
# e.g. "+++ /dev/null"
r'^\+\+\+ (.*)',
)
for regexp in regexps:
r = re.compile(regexp, re.MULTILINE)
match = r.search(diff_part)
if match is not None:
return match.groups()[0]
raise MalformedGitDiff("No new filename in diff part found. "
"Examined diff part: {}".format(diff_part)) |
<SYSTEM_TASK:>
Returns a tuple of old content and new content.
<END_TASK>
<USER_TASK:>
Description:
def get_contents(diff_part):
"""
Returns a tuple of old content and new content.
""" |
old_sha = get_old_sha(diff_part)
old_filename = get_old_filename(diff_part)
old_contents = get_old_contents(old_sha, old_filename)
new_filename = get_new_filename(diff_part)
new_contents = get_new_contents(new_filename)
return old_contents, new_contents |
<SYSTEM_TASK:>
Returns a dictionary resulting from reading a likelihood cachefile
<END_TASK>
<USER_TASK:>
Description:
def _loadcache(cachefile):
""" Returns a dictionary resulting from reading a likelihood cachefile
""" |
cache = {}
if os.path.exists(cachefile):
with open(cachefile) as f:
for line in f:
line = line.split()
if len(line) == 2:
try:
cache[int(line[0])] = float(line[1])
except:
pass
return cache |
<SYSTEM_TASK:>
Fit trapezoid shape to each eclipse in population
<END_TASK>
<USER_TASK:>
Description:
def fit_trapezoids(self, MAfn=None, msg=None, use_pbar=True, **kwargs):
"""
Fit trapezoid shape to each eclipse in population
For each instance in the population, first the correct,
physical Mandel-Agol transit shape is simulated,
and then this curve is fit with a trapezoid model
:param MAfn:
:class:`transit_basic.MAInterpolationFunction` object.
If not passed, then one with default parameters will
be created.
:param msg:
Message to be displayed for progressbar output.
:param **kwargs:
Additional keyword arguments passed to :func:`fitebs.fitebs`.
""" |
logging.info('Fitting trapezoid models for {}...'.format(self.model))
if msg is None:
msg = '{}: '.format(self.model)
n = len(self.stars)
deps, durs, slopes = (np.zeros(n), np.zeros(n), np.zeros(n))
secs = np.zeros(n, dtype=bool)
dsec = np.zeros(n)
if use_pbar and pbar_ok:
widgets = [msg+'fitting shape parameters for %i systems: ' % n,Percentage(),
' ',Bar(marker=RotatingMarker()),' ',ETA()]
pbar = ProgressBar(widgets=widgets,maxval=n)
pbar.start()
for i in range(n):
logging.debug('Fitting star {}'.format(i))
pri = (self.stars['dpri'][i] > self.stars['dsec'][i] or
np.isnan(self.stars['dsec'][i]))
sec = not pri
secs[i] = sec
if sec:
dsec[i] = self.stars['dpri'][i]
else:
dsec[i] = self.stars['dsec'][i]
try:
trap_pars = self.eclipse_trapfit(i, secondary=sec, **kwargs)
except NoEclipseError:
logging.error('No eclipse registered for star {}'.format(i))
trap_pars = (np.nan, np.nan, np.nan)
except NoFitError:
logging.error('Fit did not converge for star {}'.format(i))
trap_pars = (np.nan, np.nan, np.nan)
except KeyboardInterrupt:
raise
except:
logging.error('Unknown error for star {}'.format(i))
trap_pars = (np.nan, np.nan, np.nan)
if use_pbar and pbar_ok:
pbar.update(i)
durs[i], deps[i], slopes[i] = trap_pars
logging.info('Done.')
self.stars['depth'] = deps
self.stars['duration'] = durs
self.stars['slope'] = slopes
self.stars['secdepth'] = dsec
self.stars['secondary'] = secs
self._make_kde() |
<SYSTEM_TASK:>
Array of eclipse probabilities.
<END_TASK>
<USER_TASK:>
Description:
def eclipseprob(self):
"""
Array of eclipse probabilities.
""" |
#TODO: incorporate eccentricity/omega for exact calculation?
s = self.stars
return ((s['radius_1'] + s['radius_2'])*RSUN /
(semimajor(s['P'],s['mass_1'] + s['mass_2'])*AU)) |
<SYSTEM_TASK:>
Constrain the observed secondary depth to be less than a given value
<END_TASK>
<USER_TASK:>
Description:
def constrain_secdepth(self, thresh):
"""
Constrain the observed secondary depth to be less than a given value
:param thresh:
Maximum allowed fractional depth for diluted secondary
eclipse depth
""" |
self.apply_constraint(UpperLimit(self.secondary_depth, thresh, name='secondary depth')) |
<SYSTEM_TASK:>
Model prior for particular model.
<END_TASK>
<USER_TASK:>
Description:
def prior(self):
"""
Model prior for particular model.
Product of eclipse probability (``self.prob``),
the fraction of scenario that is allowed by the various
constraints (``self.selectfrac``), and all additional
factors in ``self.priorfactors``.
""" |
prior = self.prob * self.selectfrac
for f in self.priorfactors:
prior *= self.priorfactors[f]
return prior |
<SYSTEM_TASK:>
Adds given values to priorfactors
<END_TASK>
<USER_TASK:>
Description:
def add_priorfactor(self,**kwargs):
"""Adds given values to priorfactors
If given keyword exists already, error will be raised
to use :func:`EclipsePopulation.change_prior` instead.
""" |
for kw in kwargs:
if kw in self.priorfactors:
logging.error('%s already in prior factors for %s. use change_prior function instead.' % (kw,self.model))
continue
else:
self.priorfactors[kw] = kwargs[kw]
logging.info('%s added to prior factors for %s' % (kw,self.model)) |
<SYSTEM_TASK:>
Changes existing priorfactors.
<END_TASK>
<USER_TASK:>
Description:
def change_prior(self, **kwargs):
"""
Changes existing priorfactors.
If given keyword isn't already in priorfactors,
then will be ignored.
""" |
for kw in kwargs:
if kw in self.priorfactors:
self.priorfactors[kw] = kwargs[kw]
logging.info('{0} changed to {1} for {2} model'.format(kw,kwargs[kw],
self.model)) |
<SYSTEM_TASK:>
Evaluate KDE at given points.
<END_TASK>
<USER_TASK:>
Description:
def _density(self, logd, dur, slope):
"""
Evaluate KDE at given points.
Prepares data according to whether sklearn or scipy
KDE in use.
:param log, dur, slope:
Trapezoidal shape parameters.
""" |
if self.sklearn_kde:
#TODO: fix preprocessing
pts = np.array([(logd - self.mean_logdepth)/self.std_logdepth,
(dur - self.mean_dur)/self.std_dur,
(slope - self.mean_slope)/self.std_slope])
return self.kde.score_samples(pts)
else:
return self.kde(np.array([logd, dur, slope])) |
<SYSTEM_TASK:>
Returns likelihood of transit signal
<END_TASK>
<USER_TASK:>
Description:
def lhood(self, trsig, recalc=False, cachefile=None):
"""Returns likelihood of transit signal
Returns sum of ``trsig`` MCMC samples evaluated
at ``self.kde``.
:param trsig:
:class:`vespa.TransitSignal` object.
:param recalc: (optional)
Whether to recalculate likelihood (if calculation
is cached).
:param cachefile: (optional)
File that holds likelihood calculation cache.
""" |
if not hasattr(self,'kde'):
self._make_kde()
if cachefile is None:
cachefile = self.lhoodcachefile
if cachefile is None:
cachefile = 'lhoodcache.dat'
lhoodcache = _loadcache(cachefile)
key = hashcombine(self, trsig)
if key in lhoodcache and not recalc:
return lhoodcache[key]
if self.is_ruled_out:
return 0
N = trsig.kde.dataset.shape[1]
lh = self.kde(trsig.kde.dataset).sum() / N
with open(cachefile, 'a') as fout:
fout.write('%i %g\n' % (key, lh))
return lh |
<SYSTEM_TASK:>
Loads EclipsePopulation from HDF file
<END_TASK>
<USER_TASK:>
Description:
def load_hdf(cls, filename, path=''): #perhaps this doesn't need to be written?
"""
Loads EclipsePopulation from HDF file
Also runs :func:`EclipsePopulation._make_kde` if it can.
:param filename:
HDF file
:param path: (optional)
Path within HDF file
""" |
new = StarPopulation.load_hdf(filename, path=path)
#setup lazy loading of starmodel if present
try:
with pd.HDFStore(filename) as store:
if '{}/starmodel'.format(path) in store:
new._starmodel = None
new._starmodel_file = filename
new._starmodel_path = '{}/starmodel'.format(path)
except:
pass
try:
new._make_kde()
except NoTrapfitError:
logging.warning('Trapezoid fit not done.')
return new |
<SYSTEM_TASK:>
Unique list of constraints among all populations in set.
<END_TASK>
<USER_TASK:>
Description:
def constraints(self):
"""
Unique list of constraints among all populations in set.
""" |
cs = []
for pop in self.poplist:
cs += [c for c in pop.constraints]
return list(set(cs)) |
<SYSTEM_TASK:>
Removes population from PopulationSet
<END_TASK>
<USER_TASK:>
Description:
def remove_population(self,pop):
"""Removes population from PopulationSet
""" |
iremove=None
for i in range(len(self.poplist)):
if self.modelnames[i]==self.poplist[i].model:
iremove=i
if iremove is not None:
self.modelnames.pop(i)
self.shortmodelnames.pop(i)
self.poplist.pop(i) |
<SYSTEM_TASK:>
Dictionary holding colors that correspond to constraints.
<END_TASK>
<USER_TASK:>
Description:
def colordict(self):
"""
Dictionary holding colors that correspond to constraints.
""" |
d = {}
i=0
n = len(self.constraints)
for c in self.constraints:
#self.colordict[c] = colors[i % 6]
d[c] = cm.jet(1.*i/n)
i+=1
return d |
<SYSTEM_TASK:>
Combinartion of priorfactors from all populations
<END_TASK>
<USER_TASK:>
Description:
def priorfactors(self):
"""Combinartion of priorfactors from all populations
""" |
priorfactors = {}
for pop in self.poplist:
for f in pop.priorfactors:
if f in priorfactors:
if pop.priorfactors[f] != priorfactors[f]:
raise ValueError('prior factor %s is inconsistent!' % f)
else:
priorfactors[f] = pop.priorfactors[f]
return priorfactors |
<SYSTEM_TASK:>
Applies constraint corresponding to measuring transit in different band
<END_TASK>
<USER_TASK:>
Description:
def apply_multicolor_transit(self,band,depth):
"""
Applies constraint corresponding to measuring transit in different band
This is not implemented yet.
""" |
if '{} band transit'.format(band) not in self.constraints:
self.constraints.append('{} band transit'.format(band))
for pop in self.poplist:
pop.apply_multicolor_transit(band,depth) |
<SYSTEM_TASK:>
Sets max allowed radius in populations.
<END_TASK>
<USER_TASK:>
Description:
def set_maxrad(self,newrad):
"""
Sets max allowed radius in populations.
Doesn't operate via the :class:`stars.Constraint`
protocol; rather just rescales the sky positions
for the background objects and recalculates
sky area, etc.
""" |
if not isinstance(newrad, Quantity):
newrad = newrad * u.arcsec
#if 'Rsky' not in self.constraints:
# self.constraints.append('Rsky')
for pop in self.poplist:
if not pop.is_specific:
try:
pop.maxrad = newrad
except AttributeError:
pass |
<SYSTEM_TASK:>
Applies a constraint that sets the maximum brightness for non-target star
<END_TASK>
<USER_TASK:>
Description:
def apply_dmaglim(self,dmaglim=None):
"""
Applies a constraint that sets the maximum brightness for non-target star
:func:`stars.StarPopulation.set_dmaglim` not yet implemented.
""" |
raise NotImplementedError
if 'bright blend limit' not in self.constraints:
self.constraints.append('bright blend limit')
for pop in self.poplist:
if not hasattr(pop,'dmaglim') or pop.is_specific:
continue
if dmaglim is None:
dmag = pop.dmaglim
else:
dmag = dmaglim
pop.set_dmaglim(dmag)
self.dmaglim = dmaglim |
<SYSTEM_TASK:>
Applies constraint corresponding to RV trend non-detection to each population
<END_TASK>
<USER_TASK:>
Description:
def apply_trend_constraint(self, limit, dt, **kwargs):
"""
Applies constraint corresponding to RV trend non-detection to each population
See :func:`stars.StarPopulation.apply_trend_constraint`;
all arguments passed to that function for each population.
""" |
if 'RV monitoring' not in self.constraints:
self.constraints.append('RV monitoring')
for pop in self.poplist:
if not hasattr(pop,'dRV'):
continue
pop.apply_trend_constraint(limit, dt, **kwargs)
self.trend_limit = limit
self.trend_dt = dt |
<SYSTEM_TASK:>
Applies secondary depth constraint to each population
<END_TASK>
<USER_TASK:>
Description:
def apply_secthresh(self, secthresh, **kwargs):
"""Applies secondary depth constraint to each population
See :func:`EclipsePopulation.apply_secthresh`;
all arguments passed to that function for each population.
""" |
if 'secondary depth' not in self.constraints:
self.constraints.append('secondary depth')
for pop in self.poplist:
if not isinstance(pop, EclipsePopulation_Px2):
pop.apply_secthresh(secthresh, **kwargs)
self.secthresh = secthresh |
<SYSTEM_TASK:>
Constrains property for each population
<END_TASK>
<USER_TASK:>
Description:
def constrain_property(self,prop,**kwargs):
"""
Constrains property for each population
See :func:`vespa.stars.StarPopulation.constrain_property`;
all arguments passed to that function for each population.
""" |
if prop not in self.constraints:
self.constraints.append(prop)
for pop in self.poplist:
try:
pop.constrain_property(prop,**kwargs)
except AttributeError:
logging.info('%s model does not have property stars.%s (constraint not applied)' % (pop.model,prop)) |
<SYSTEM_TASK:>
Replaces removed constraint in each population.
<END_TASK>
<USER_TASK:>
Description:
def replace_constraint(self,name,**kwargs):
"""
Replaces removed constraint in each population.
See :func:`vespa.stars.StarPopulation.replace_constraint`
""" |
for pop in self.poplist:
pop.replace_constraint(name,**kwargs)
if name not in self.constraints:
self.constraints.append(name) |
<SYSTEM_TASK:>
Removes constraint from each population
<END_TASK>
<USER_TASK:>
Description:
def remove_constraint(self,*names):
"""
Removes constraint from each population
See :func:`vespa.stars.StarPopulation.remove_constraint
""" |
for name in names:
for pop in self.poplist:
if name in pop.constraints:
pop.remove_constraint(name)
else:
logging.info('%s model does not have %s constraint' % (pop.model,name))
if name in self.constraints:
self.constraints.remove(name) |
<SYSTEM_TASK:>
Applies contrast curve constraint to each population
<END_TASK>
<USER_TASK:>
Description:
def apply_cc(self, cc, **kwargs):
"""
Applies contrast curve constraint to each population
See :func:`vespa.stars.StarPopulation.apply_cc`;
all arguments passed to that function for each population.
""" |
if type(cc)==type(''):
pass
if cc.name not in self.constraints:
self.constraints.append(cc.name)
for pop in self.poplist:
if not pop.is_specific:
try:
pop.apply_cc(cc, **kwargs)
except AttributeError:
logging.info('%s cc not applied to %s model' % (cc.name,pop.model)) |
<SYSTEM_TASK:>
Applies velocity contrast curve constraint to each population
<END_TASK>
<USER_TASK:>
Description:
def apply_vcc(self,vcc):
"""
Applies velocity contrast curve constraint to each population
See :func:`vespa.stars.StarPopulation.apply_vcc`;
all arguments passed to that function for each population.
""" |
if 'secondary spectrum' not in self.constraints:
self.constraints.append('secondary spectrum')
for pop in self.poplist:
if not pop.is_specific:
try:
pop.apply_vcc(vcc)
except:
logging.info('VCC constraint not applied to %s model' % (pop.model)) |
<SYSTEM_TASK:>
Runs get_trilegal perl script; optionally saves output into .h5 file
<END_TASK>
<USER_TASK:>
Description:
def get_trilegal(filename,ra,dec,folder='.', galactic=False,
filterset='kepler_2mass',area=1,maglim=27,binaries=False,
trilegal_version='1.6',sigma_AV=0.1,convert_h5=True):
"""Runs get_trilegal perl script; optionally saves output into .h5 file
Depends on a perl script provided by L. Girardi; calls the
web form simulation, downloads the file, and (optionally) converts
to HDF format.
Uses A_V at infinity from :func:`utils.get_AV_infinity`.
.. note::
Would be desirable to re-write the get_trilegal script
all in python.
:param filename:
Desired output filename. If extension not provided, it will
be added.
:param ra,dec:
Coordinates (ecliptic) for line-of-sight simulation.
:param folder: (optional)
Folder to which to save file. *Acknowledged, file control
in this function is a bit wonky.*
:param filterset: (optional)
Filter set for which to call TRILEGAL.
:param area: (optional)
Area of TRILEGAL simulation [sq. deg]
:param maglim: (optional)
Limiting magnitude in first mag (by default will be Kepler band)
If want to limit in different band, then you have to
got directly to the ``get_trilegal`` perl script.
:param binaries: (optional)
Whether to have TRILEGAL include binary stars. Default ``False``.
:param trilegal_version: (optional)
Default ``'1.6'``.
:param sigma_AV: (optional)
Fractional spread in A_V along the line of sight.
:param convert_h5: (optional)
If true, text file downloaded from TRILEGAL will be converted
into a ``pandas.DataFrame`` stored in an HDF file, with ``'df'``
path.
""" |
if galactic:
l, b = ra, dec
else:
try:
c = SkyCoord(ra,dec)
except UnitsError:
c = SkyCoord(ra,dec,unit='deg')
l,b = (c.galactic.l.value,c.galactic.b.value)
if os.path.isabs(filename):
folder = ''
if not re.search('\.dat$',filename):
outfile = '{}/{}.dat'.format(folder,filename)
else:
outfile = '{}/{}'.format(folder,filename)
AV = get_AV_infinity(l,b,frame='galactic')
#cmd = 'get_trilegal %s %f %f %f %i %.3f %.2f %s 1 %.1f %s' % (trilegal_version,l,b,
# area,binaries,AV,sigma_AV,
# filterset,maglim,outfile)
#sp.Popen(cmd,shell=True).wait()
trilegal_webcall(trilegal_version,l,b,area,binaries,AV,sigma_AV,filterset,maglim,outfile)
if convert_h5:
df = pd.read_table(outfile, sep='\s+', skipfooter=1, engine='python')
df = df.rename(columns={'#Gc':'Gc'})
for col in df.columns:
if col not in NONMAG_COLS:
df.rename(columns={col:'{}_mag'.format(col)},inplace=True)
if not re.search('\.h5$', filename):
h5file = '{}/{}.h5'.format(folder,filename)
else:
h5file = '{}/{}'.format(folder,filename)
df.to_hdf(h5file,'df')
with pd.HDFStore(h5file) as store:
attrs = store.get_storer('df').attrs
attrs.trilegal_args = {'version':trilegal_version,
'ra':ra, 'dec':dec,
'l':l,'b':b,'area':area,
'AV':AV, 'sigma_AV':sigma_AV,
'filterset':filterset,
'maglim':maglim,
'binaries':binaries}
os.remove(outfile) |
<SYSTEM_TASK:>
Wrapper to set logging parameters for output
<END_TASK>
<USER_TASK:>
Description:
def log_wrapper(self):
"""
Wrapper to set logging parameters for output
""" |
log = logging.getLogger('client.py')
# Set the log format and log level
try:
debug = self.params["debug"]
log.setLevel(logging.DEBUG)
except KeyError:
log.setLevel(logging.INFO)
# Set the log format.
stream = logging.StreamHandler()
logformat = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%b %d %H:%M:%S')
stream.setFormatter(logformat)
log.addHandler(stream)
return log |
<SYSTEM_TASK:>
Encode uint128 setid as stripped b32encoded string
<END_TASK>
<USER_TASK:>
Description:
def encode_setid(uint128):
"""Encode uint128 setid as stripped b32encoded string""" |
hi, lo = divmod(uint128, 2**64)
return b32encode(struct.pack('<QQ', lo, hi))[:-6].lower() |
<SYSTEM_TASK:>
Returns an image with reduced opacity. Converts image to RGBA if needs.
<END_TASK>
<USER_TASK:>
Description:
def _reduce_opacity(self, watermark, opacity):
"""
Returns an image with reduced opacity. Converts image to RGBA if needs.
Simple watermark.opacity(65535 - int(65535 * opacity) would not work for
images with the Opacity channel (RGBA images). So we have to convert RGB or any
other type to RGBA in this case
""" |
if watermark.type() != ImageType.TrueColorMatteType:
watermark.type(ImageType.TrueColorMatteType)
depth = 255 - int(255 * opacity)
watermark.quantumOperator(ChannelType.OpacityChannel, QuOp.MaxQuantumOp, depth) |
<SYSTEM_TASK:>
Cleanup listing relations
<END_TASK>
<USER_TASK:>
Description:
def cleanup_relations(self):
"""Cleanup listing relations""" |
collections = self.collections
for relation in [x for col in collections.values()
for x in col.model.relations.values()]:
db.session.query(relation)\
.filter(~relation.listing.any())\
.delete(synchronize_session=False)
db.session.commit() |
<SYSTEM_TASK:>
Cleanup unused tags and discarded datasets.
<END_TASK>
<USER_TASK:>
Description:
def marvcli_cleanup(ctx, discarded, unused_tags):
"""Cleanup unused tags and discarded datasets.""" |
if not any([discarded, unused_tags]):
click.echo(ctx.get_help())
ctx.exit(1)
site = create_app().site
if discarded:
site.cleanup_discarded()
if unused_tags:
site.cleanup_tags()
site.cleanup_relations() |
<SYSTEM_TASK:>
Run development webserver.
<END_TASK>
<USER_TASK:>
Description:
def marvcli_develop_server(port, public):
"""Run development webserver.
ATTENTION: By default it is only served on localhost. To run it
within a container and access it from the outside, you need to
forward the port and tell it to listen on all IPs instead of only
localhost.
""" |
from flask_cors import CORS
app = create_app(push=False)
app.site.load_for_web()
CORS(app)
class IPDBMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
from ipdb import launch_ipdb_on_exception
with launch_ipdb_on_exception():
appiter = self.app(environ, start_response)
for item in appiter:
yield item
app.debug = True
if IPDB:
app.wsgi_app = IPDBMiddleware(app.wsgi_app)
app.run(use_debugger=False,
use_reloader=False,
host=('0.0.0.0' if public else '127.0.0.1'),
port=port,
threaded=False)
else:
app.run(host=('0.0.0.0' if public else '127.0.0.1'),
port=port,
reloader_type='watchdog',
threaded=False) |
<SYSTEM_TASK:>
Mark DATASETS to be discarded or discard associated data.
<END_TASK>
<USER_TASK:>
Description:
def marvcli_discard(datasets, all_nodes, nodes, tags, comments, confirm):
"""Mark DATASETS to be discarded or discard associated data.
Without any options the specified datasets are marked to be
discarded via `marv cleanup --discarded`. Use `marv undiscard` to
undo this operation.
Otherwise, selected data associated with the specified datasets is
discarded right away.
""" |
mark_discarded = not any([all_nodes, nodes, tags, comments])
site = create_app().site
setids = parse_setids(datasets)
if tags or comments:
if confirm:
msg = ' and '.join(filter(None, ['tags' if tags else None,
'comments' if comments else None]))
click.echo('About to delete {}'.format(msg))
click.confirm('This cannot be undone. Do you want to continue?', abort=True)
ids = [x[0] for x in db.session.query(Dataset.id).filter(Dataset.setid.in_(setids))]
if tags:
where = dataset_tag.c.dataset_id.in_(ids)
stmt = dataset_tag.delete().where(where)
db.session.execute(stmt)
if comments:
comment_table = Comment.__table__
where = comment_table.c.dataset_id.in_(ids)
stmt = comment_table.delete().where(where)
db.session.execute(stmt)
if nodes or all_nodes:
storedir = site.config.marv.storedir
for setid in setids:
setdir = os.path.join(storedir, setid)
# TODO: see where we are getting with dep tree tables
if mark_discarded:
dataset = Dataset.__table__
stmt = dataset.update()\
.where(dataset.c.setid.in_(setids))\
.values(discarded=True)
db.session.execute(stmt)
db.session.commit() |
<SYSTEM_TASK:>
Restore previously dumped database
<END_TASK>
<USER_TASK:>
Description:
def marvcli_restore(file):
"""Restore previously dumped database""" |
data = json.load(file)
site = create_app().site
site.restore_database(**data) |
<SYSTEM_TASK:>
Query datasets.
<END_TASK>
<USER_TASK:>
Description:
def marvcli_query(ctx, list_tags, collections, discarded, outdated, path, tags, null):
"""Query datasets.
Use --collection=* to list all datasets across all collections.
""" |
if not any([collections, discarded, list_tags, outdated, path, tags]):
click.echo(ctx.get_help())
ctx.exit(1)
sep = '\x00' if null else '\n'
site = create_app().site
if '*' in collections:
collections = None
else:
for col in collections:
if col not in site.collections:
ctx.fail('Unknown collection: {}'.format(col))
if list_tags:
tags = site.listtags(collections)
if tags:
click.echo(sep.join(tags), nl=not null)
else:
click.echo('no tags', err=True)
return
setids = site.query(collections, discarded, outdated, path, tags)
if setids:
sep = '\x00' if null else '\n'
click.echo(sep.join(setids), nl=not null) |
<SYSTEM_TASK:>
Add or remove tags to datasets
<END_TASK>
<USER_TASK:>
Description:
def marvcli_tag(ctx, add, remove, datasets):
"""Add or remove tags to datasets""" |
if not any([add, remove]) or not datasets:
click.echo(ctx.get_help())
ctx.exit(1)
app = create_app()
setids = parse_setids(datasets)
app.site.tag(setids, add, remove) |
<SYSTEM_TASK:>
Add comment as user for one or more datasets
<END_TASK>
<USER_TASK:>
Description:
def marvcli_comment_add(user, message, datasets):
"""Add comment as user for one or more datasets""" |
app = create_app()
try:
db.session.query(User).filter(User.name==user).one()
except NoResultFound:
click.echo("ERROR: No such user '{}'".format(user), err=True)
sys.exit(1)
ids = parse_setids(datasets, dbids=True)
app.site.comment(user, message, ids) |
<SYSTEM_TASK:>
Lists comments for datasets.
<END_TASK>
<USER_TASK:>
Description:
def marvcli_comment_list(datasets):
"""Lists comments for datasets.
Output: setid comment_id date time author message
""" |
app = create_app()
ids = parse_setids(datasets, dbids=True)
comments = db.session.query(Comment)\
.options(db.joinedload(Comment.dataset))\
.filter(Comment.dataset_id.in_(ids))
for comment in sorted(comments, key=lambda x: (x.dataset._setid, x.id)):
print(comment.dataset.setid, comment.id,
datetime.datetime.fromtimestamp(int(comment.time_added / 1000)),
comment.author, repr(comment.text)) |
<SYSTEM_TASK:>
Remove comments.
<END_TASK>
<USER_TASK:>
Description:
def marvcli_comment_rm(ids):
"""Remove comments.
Remove comments by id as given in second column of: marv comment list
""" |
app = create_app()
db.session.query(Comment)\
.filter(Comment.id.in_(ids))\
.delete(synchronize_session=False)
db.session.commit() |
<SYSTEM_TASK:>
Wrapper for ``_watermark``
<END_TASK>
<USER_TASK:>
Description:
def watermark(self, image, options):
"""
Wrapper for ``_watermark``
Takes care of all the options handling.
""" |
watermark_img = options.get("watermark", settings.THUMBNAIL_WATERMARK)
if not watermark_img:
raise AttributeError("No THUMBNAIL_WATERMARK defined or set on tag.")
watermark_path = find(watermark_img)
if not watermark_path:
raise RuntimeError("Could not find the configured watermark file.")
if not os.path.isfile(watermark_path):
raise RuntimeError("Set watermark does not point to a file.")
if "cropbox" not in options:
options["cropbox"] = None
if "watermark_alpha" not in options:
options["watermark_alpha"] = settings.THUMBNAIL_WATERMARK_OPACITY
mark_sizes = options.get("watermark_size", settings.THUMBNAIL_WATERMARK_SIZE)
if mark_sizes:
try:
options["watermark_size"] = parse_geometry(
mark_sizes, self.get_image_ratio(image, options)
)
except TypeError as e:
raise TypeError(
"Please, update sorl-thumbnail package version to >= 11.12b. %s"
% e
)
else:
options["watermark_size"] = False
if "watermark_pos" not in options:
options["watermark_pos"] = settings.THUMBNAIL_WATERMARK_POSITION
return self._watermark(
image,
watermark_path,
options["watermark_alpha"],
options["watermark_size"],
options["watermark_pos"],
) |
<SYSTEM_TASK:>
Functions available for listing columns and filters.
<END_TASK>
<USER_TASK:>
Description:
def make_funcs(dataset, setdir, store):
"""Functions available for listing columns and filters.""" |
return {
'cat': lambda *lists: [x for lst in lists for x in lst],
'comments': lambda: None,
'detail_route': detail_route,
'format': lambda fmt, *args: fmt.format(*args),
'get': partial(getnode, dataset, setdir, store),
'join': lambda sep, *args: sep.join([x for x in args if x]),
'len': len,
'link': (lambda href, title, target=None:
{'href': href or "",
'title': title or "",
'target': '_blank' if target is None else target}),
'list': lambda *x: filter(None, list(x)),
'max': max,
'min': min,
'status': lambda: ['#STATUS#'],
'sum': sum,
'tags': lambda: ['#TAGS#'],
'trace': print_trace,
} |
<SYSTEM_TASK:>
Functions available for listing summary fields.
<END_TASK>
<USER_TASK:>
Description:
def make_summary_funcs(rows, ids):
"""Functions available for listing summary fields.""" |
return {
'len': len,
'list': lambda *x: filter(None, list(x)),
'max': max,
'min': min,
'rows': partial(summary_rows, rows, ids),
'sum': sum,
'trace': print_trace
} |
<SYSTEM_TASK:>
Create read-only property that caches its function's value
<END_TASK>
<USER_TASK:>
Description:
def cached_property(func):
"""Create read-only property that caches its function's value""" |
@functools.wraps(func)
def cached_func(self):
cacheattr = '_{}'.format(func.func_name)
try:
return getattr(self, cacheattr)
except AttributeError:
value = func(self)
setattr(self, cacheattr, value)
return value
return property(cached_func) |
<SYSTEM_TASK:>
Create a stream for publishing messages.
<END_TASK>
<USER_TASK:>
Description:
def create_stream(name, **header):
"""Create a stream for publishing messages.
All keyword arguments will be used to form the header.
""" |
assert isinstance(name, basestring), name
return CreateStream(parent=None, name=name, group=False, header=header) |
<SYSTEM_TASK:>
Pulls next message for handle.
<END_TASK>
<USER_TASK:>
Description:
def pull(handle, enumerate=False):
"""Pulls next message for handle.
Args:
handle: A :class:`.stream.Handle` or GroupHandle.
enumerate (bool): boolean to indicate whether a tuple ``(idx, msg)``
should be returned, not unlike Python's enumerate().
Returns:
A :class:`Pull` task to be yielded. Marv will send the
corresponding message as soon as it is available. For groups
this message will be a handle to a member of the
group. Members of groups are either streams or groups.
Examples:
Pulling (enumerated) message from stream::
msg = yield marv.pull(stream)
idx, msg = yield marv.pull(stream, enumerate=True)
Pulling stream from group and message from stream::
stream = yield marv.pull(group) # a group of streams
msg = yield marv.pull(stream)
""" |
assert isinstance(handle, Handle), handle
return Pull(handle, enumerate) |
<SYSTEM_TASK:>
Extract first image of input stream to jpg file.
<END_TASK>
<USER_TASK:>
Description:
def image(cam):
"""Extract first image of input stream to jpg file.
Args:
cam: Input stream of raw rosbag messages.
Returns:
File instance for first image of input stream.
""" |
# Set output stream title and pull first message
yield marv.set_header(title=cam.topic)
msg = yield marv.pull(cam)
if msg is None:
return
# Deserialize raw ros message
pytype = get_message_type(cam)
rosmsg = pytype()
rosmsg.deserialize(msg.data)
# Write image to jpeg and push it to output stream
name = '{}.jpg'.format(cam.topic.replace('/', ':')[1:])
imgfile = yield marv.make_file(name)
img = imgmsg_to_cv2(rosmsg, "rgb8")
cv2.imwrite(imgfile.path, img, (cv2.IMWRITE_JPEG_QUALITY, 60))
yield marv.push(imgfile) |
<SYSTEM_TASK:>
Create detail section with one image.
<END_TASK>
<USER_TASK:>
Description:
def image_section(image, title):
"""Create detail section with one image.
Args:
title (str): Title to be displayed for detail section.
image: marv image file.
Returns
One detail section.
""" |
# pull first image
img = yield marv.pull(image)
if img is None:
return
# create image widget and section containing it
widget = {'title': image.title, 'image': {'src': img.relpath}}
section = {'title': title, 'widgets': [widget]}
yield marv.push(section) |
<SYSTEM_TASK:>
Extract images from input stream to jpg files.
<END_TASK>
<USER_TASK:>
Description:
def images(cam):
"""Extract images from input stream to jpg files.
Args:
cam: Input stream of raw rosbag messages.
Returns:
File instances for images of input stream.
""" |
# Set output stream title and pull first message
yield marv.set_header(title=cam.topic)
# Fetch and process first 20 image messages
name_template = '%s-{}.jpg' % cam.topic.replace('/', ':')[1:]
while True:
idx, msg = yield marv.pull(cam, enumerate=True)
if msg is None or idx >= 20:
break
# Deserialize raw ros message
pytype = get_message_type(cam)
rosmsg = pytype()
rosmsg.deserialize(msg.data)
# Write image to jpeg and push it to output stream
img = imgmsg_to_cv2(rosmsg, "rgb8")
name = name_template.format(idx)
imgfile = yield marv.make_file(name)
cv2.imwrite(imgfile.path, img)
yield marv.push(imgfile) |
<SYSTEM_TASK:>
Create detail section with gallery.
<END_TASK>
<USER_TASK:>
Description:
def gallery_section(images, title):
"""Create detail section with gallery.
Args:
title (str): Title to be displayed for detail section.
images: stream of marv image files
Returns
One detail section.
""" |
# pull all images
imgs = []
while True:
img = yield marv.pull(images)
if img is None:
break
imgs.append({'src': img.relpath})
if not imgs:
return
# create gallery widget and section containing it
widget = {'title': images.title, 'gallery': {'images': imgs}}
section = {'title': title, 'widgets': [widget]}
yield marv.push(section) |
<SYSTEM_TASK:>
Set a name on a validator callable.
<END_TASK>
<USER_TASK:>
Description:
def name(name, validator=None):
""" Set a name on a validator callable.
Useful for user-friendly reporting when using lambdas to populate the [`Invalid.expected`](#invalid) field:
```python
from good import Schema, name
Schema(lambda x: int(x))('a')
#-> Invalid: invalid literal for int(): expected <lambda>(), got
Schema(name('int()', lambda x: int(x))('a')
#-> Invalid: invalid literal for int(): expected int(), got a
```
Note that it is only useful with lambdas, since function name is used if available:
see notes on [Schema Callables](#callables).
:param name: Name to assign on the validator callable
:type name: unicode
:param validator: Validator callable. If not provided -- a decorator is returned instead:
```python
from good import name
@name(u'int()')
def int(v):
return int(v)
```
:type validator: callable
:return: The same validator callable
:rtype: callable
""" |
# Decorator mode
if validator is None:
def decorator(f):
f.name = name
return f
return decorator
# Direct mode
validator.name = name
return validator |
<SYSTEM_TASK:>
Validator factory which call a single method on the string.
<END_TASK>
<USER_TASK:>
Description:
def stringmethod(func):
""" Validator factory which call a single method on the string. """ |
method_name = func()
@wraps(func)
def factory():
def validator(v):
if not isinstance(v, six.string_types):
raise Invalid(_(u'Not a string'), get_type_name(six.text_type), get_type_name(type(v)))
return getattr(v, method_name)()
return validator
return factory |
<SYSTEM_TASK:>
Parse %z offset into `timedelta`
<END_TASK>
<USER_TASK:>
Description:
def parse_z(cls, offset):
""" Parse %z offset into `timedelta` """ |
assert len(offset) == 5, 'Invalid offset string format, must be "+HHMM"'
return timedelta(hours=int(offset[:3]), minutes=int(offset[0] + offset[3:])) |
<SYSTEM_TASK:>
Format `timedelta` into %z
<END_TASK>
<USER_TASK:>
Description:
def format_z(cls, offset):
""" Format `timedelta` into %z """ |
sec = offset.total_seconds()
return '{s}{h:02d}{m:02d}'.format(s='-' if sec<0 else '+', h=abs(int(sec/3600)), m=int((sec%3600)/60)) |
<SYSTEM_TASK:>
Parse a datetime string using the provided format.
<END_TASK>
<USER_TASK:>
Description:
def strptime(cls, value, format):
""" Parse a datetime string using the provided format.
This also emulates `%z` support on Python 2.
:param value: Datetime string
:type value: str
:param format: Format to use for parsing
:type format: str
:rtype: datetime
:raises ValueError: Invalid format
:raises TypeError: Invalid input type
""" |
# Simplest case: direct parsing
if cls.python_supports_z or '%z' not in format:
return datetime.strptime(value, format)
else:
# %z emulation case
assert format[-2:] == '%z', 'For performance, %z is only supported at the end of the string'
# Parse
dt = datetime.strptime(value[:-5], format[:-2]) # cutoff '%z' and '+0000'
tz = FixedOffset(value[-5:]) # parse %z into tzinfo
# Localize
return dt.replace(tzinfo=tz) |
<SYSTEM_TASK:>
Generate a random type and samples for it.
<END_TASK>
<USER_TASK:>
Description:
def generate_random_type(valid):
""" Generate a random type and samples for it.
:param valid: Generate valid samples?
:type valid: bool
:return: type, sample-generator
:rtype: type, generator
""" |
type = choice(['int', 'str'])
r = lambda: randrange(-1000000000, 1000000000)
if type == 'int':
return int, (r() if valid else str(r()) for i in itertools.count())
elif type == 'str':
return str, (str(r()) if valid else r() for i in itertools.count())
else:
raise AssertionError('!') |
<SYSTEM_TASK:>
Generate a random plain schema, and a sample generation function.
<END_TASK>
<USER_TASK:>
Description:
def generate_random_schema(valid):
""" Generate a random plain schema, and a sample generation function.
:param valid: Generate valid samples?
:type valid: bool
:returns: schema, sample-generator
:rtype: *, generator
""" |
schema_type = choice(['literal', 'type'])
if schema_type == 'literal':
type, gen = generate_random_type(valid)
value = next(gen)
return value, (value if valid else None for i in itertools.count())
elif schema_type == 'type':
return generate_random_type(valid)
else:
raise AssertionError('!') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.