text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_popset(self,filename='popset.h5',**kwargs):
"""Saves the PopulationSet Calls :func:`PopulationSet.save_hdf`. """
|
self.popset.save_hdf(os.path.join(self.folder,filename))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_signal(self,filename=None):
""" Saves TransitSignal. Calls :func:`TransitSignal.save`; default filename is ``trsig.pkl`` in ``self.folder``. """
|
if filename is None:
filename = os.path.join(self.folder,'trsig.pkl')
self.trsig.save(filename)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def modelshift_weaksec(koi):
""" Max secondary depth based on model-shift secondary test from Jeff Coughlin secondary metric: mod_depth_sec_dv * (1 + 3*mod_fred_dv / mod_sig_sec_dv) """
|
num = KOIDATA.ix[ku.koiname(koi), 'koi_tce_plnt_num']
if np.isnan(num):
num = 1
kid = KOIDATA.ix[ku.koiname(koi), 'kepid']
tce = '{:09.0f}-{:02.0f}'.format(kid,num)
#return largest depth between DV detrending and alternate detrending
try:
r = ROBOVETDATA.ix[tce]
except KeyError:
raise NoWeakSecondaryError(koi)
depth_dv = r['mod_depth_sec_dv'] * (1 + 3*r['mod_fred_dv'] / r['mod_sig_sec_dv'])
depth_alt = r['mod_depth_sec_alt'] * (1 + 3*r['mod_fred_alt'] / r['mod_sig_sec_alt'])
logging.debug(r[['mod_depth_sec_dv','mod_fred_dv','mod_sig_sec_dv']])
logging.debug(r[['mod_depth_sec_alt','mod_fred_alt','mod_sig_sec_alt']])
if np.isnan(depth_dv) and np.isnan(depth_alt):
#return weaksec_vv2(koi)
raise NoWeakSecondaryError(koi)
elif np.isnan(depth_dv):
return depth_alt
elif np.isnan(depth_alt):
return depth_dv
else:
return max(depth_dv, depth_alt)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def use_property(kepid, prop):
"""Returns true if provenance of property is SPE or AST """
|
try:
prov = kicu.DATA.ix[kepid, '{}_prov'.format(prop)]
return any([prov.startswith(s) for s in ['SPE', 'AST']])
except KeyError:
raise MissingStellarError('{} not in stellar table?'.format(kepid))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def star_config(koi, bands=['g','r','i','z','J','H','K'], unc=dict(g=0.05, r=0.05, i=0.05, z=0.05, J=0.02, H=0.02, K=0.02), **kwargs):
"""returns star config object for given KOI """
|
folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))
if not os.path.exists(folder):
os.makedirs(folder)
config = ConfigObj(os.path.join(folder,'star.ini'))
koi = ku.koiname(koi)
maxAV = koi_maxAV(koi)
config['maxAV'] = maxAV
mags = ku.KICmags(koi)
for band in bands:
if not np.isnan(mags[band]):
config[band] = (mags[band], unc[band])
config['Kepler'] = mags['Kepler']
kepid = KOIDATA.ix[koi,'kepid']
if use_property(kepid, 'teff'):
teff, e_teff = (kicu.DATA.ix[kepid, 'teff'],
kicu.DATA.ix[kepid, 'teff_err1'])
if not any(np.isnan([teff, e_teff])):
config['Teff'] = (teff, e_teff)
if use_property(kepid, 'logg'):
logg, e_logg = (kicu.DATA.ix[kepid, 'logg'],
kicu.DATA.ix[kepid, 'logg_err1'])
if not any(np.isnan([logg, e_logg])):
config['logg'] = (logg, e_logg)
if use_property(kepid, 'feh'):
feh, e_feh = (kicu.DATA.ix[kepid, 'feh'],
kicu.DATA.ix[kepid, 'feh_err1'])
if not any(np.isnan([feh, e_feh])):
config['feh'] = (feh, e_feh)
for kw,val in kwargs.items():
config[kw] = val
return config
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fpp_config(koi, **kwargs):
"""returns config object for given KOI """
|
folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))
if not os.path.exists(folder):
os.makedirs(folder)
config = ConfigObj(os.path.join(folder,'fpp.ini'))
koi = ku.koiname(koi)
rowefit = jrowe_fit(koi)
config['name'] = koi
ra,dec = ku.radec(koi)
config['ra'] = ra
config['dec'] = dec
config['rprs'] = rowefit.ix['RD1','val']
config['period'] = rowefit.ix['PE1', 'val']
config['starfield'] = kepler_starfield_file(koi)
for kw,val in kwargs.items():
config[kw] = val
config['constraints'] = {}
config['constraints']['maxrad'] = default_r_exclusion(koi)
try:
config['constraints']['secthresh'] = pipeline_weaksec(koi)
except NoWeakSecondaryError:
pass
return config
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_default_constraints(self):
"""Applies default secthresh & exclusion radius constraints """
|
try:
self.apply_secthresh(pipeline_weaksec(self.koi))
except NoWeakSecondaryError:
logging.warning('No secondary eclipse threshold set for {}'.format(self.koi))
self.set_maxrad(default_r_exclusion(self.koi))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_old_sha(diff_part):
""" Returns the SHA for the original file that was changed in a diff part. """
|
r = re.compile(r'index ([a-fA-F\d]*)')
return r.search(diff_part).groups()[0]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_old_filename(diff_part):
""" Returns the filename for the original file that was changed in a diff part. """
|
regexps = (
# e.g. "+++ a/foo/bar"
r'^--- a/(.*)',
# e.g. "+++ /dev/null"
r'^\-\-\- (.*)',
)
for regexp in regexps:
r = re.compile(regexp, re.MULTILINE)
match = r.search(diff_part)
if match is not None:
return match.groups()[0]
raise MalformedGitDiff("No old filename in diff part found. "
"Examined diff part: {}".format(diff_part))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_new_filename(diff_part):
""" Returns the filename for the updated file in a diff part. """
|
regexps = (
# e.g. "+++ b/foo/bar"
r'^\+\+\+ b/(.*)',
# e.g. "+++ /dev/null"
r'^\+\+\+ (.*)',
)
for regexp in regexps:
r = re.compile(regexp, re.MULTILINE)
match = r.search(diff_part)
if match is not None:
return match.groups()[0]
raise MalformedGitDiff("No new filename in diff part found. "
"Examined diff part: {}".format(diff_part))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_contents(diff_part):
""" Returns a tuple of old content and new content. """
|
old_sha = get_old_sha(diff_part)
old_filename = get_old_filename(diff_part)
old_contents = get_old_contents(old_sha, old_filename)
new_filename = get_new_filename(diff_part)
new_contents = get_new_contents(new_filename)
return old_contents, new_contents
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _loadcache(cachefile):
""" Returns a dictionary resulting from reading a likelihood cachefile """
|
cache = {}
if os.path.exists(cachefile):
with open(cachefile) as f:
for line in f:
line = line.split()
if len(line) == 2:
try:
cache[int(line[0])] = float(line[1])
except:
pass
return cache
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit_trapezoids(self, MAfn=None, msg=None, use_pbar=True, **kwargs):
""" Fit trapezoid shape to each eclipse in population For each instance in the population, first the correct, physical Mandel-Agol transit shape is simulated, and then this curve is fit with a trapezoid model :param MAfn: :class:`transit_basic.MAInterpolationFunction` object. If not passed, then one with default parameters will be created. :param msg: Message to be displayed for progressbar output. :param **kwargs: Additional keyword arguments passed to :func:`fitebs.fitebs`. """
|
logging.info('Fitting trapezoid models for {}...'.format(self.model))
if msg is None:
msg = '{}: '.format(self.model)
n = len(self.stars)
deps, durs, slopes = (np.zeros(n), np.zeros(n), np.zeros(n))
secs = np.zeros(n, dtype=bool)
dsec = np.zeros(n)
if use_pbar and pbar_ok:
widgets = [msg+'fitting shape parameters for %i systems: ' % n,Percentage(),
' ',Bar(marker=RotatingMarker()),' ',ETA()]
pbar = ProgressBar(widgets=widgets,maxval=n)
pbar.start()
for i in range(n):
logging.debug('Fitting star {}'.format(i))
pri = (self.stars['dpri'][i] > self.stars['dsec'][i] or
np.isnan(self.stars['dsec'][i]))
sec = not pri
secs[i] = sec
if sec:
dsec[i] = self.stars['dpri'][i]
else:
dsec[i] = self.stars['dsec'][i]
try:
trap_pars = self.eclipse_trapfit(i, secondary=sec, **kwargs)
except NoEclipseError:
logging.error('No eclipse registered for star {}'.format(i))
trap_pars = (np.nan, np.nan, np.nan)
except NoFitError:
logging.error('Fit did not converge for star {}'.format(i))
trap_pars = (np.nan, np.nan, np.nan)
except KeyboardInterrupt:
raise
except:
logging.error('Unknown error for star {}'.format(i))
trap_pars = (np.nan, np.nan, np.nan)
if use_pbar and pbar_ok:
pbar.update(i)
durs[i], deps[i], slopes[i] = trap_pars
logging.info('Done.')
self.stars['depth'] = deps
self.stars['duration'] = durs
self.stars['slope'] = slopes
self.stars['secdepth'] = dsec
self.stars['secondary'] = secs
self._make_kde()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def eclipseprob(self):
""" Array of eclipse probabilities. """
|
#TODO: incorporate eccentricity/omega for exact calculation?
s = self.stars
return ((s['radius_1'] + s['radius_2'])*RSUN /
(semimajor(s['P'],s['mass_1'] + s['mass_2'])*AU))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def modelshort(self):
""" Short version of model name Dictionary defined in ``populations.py``:: SHORT_MODELNAMES = {'Planets':'pl', 'EBs':'eb', 'HEBs':'heb', 'BEBs':'beb', 'Blended Planets':'bpl', 'Specific BEB':'sbeb', 'Specific HEB':'sheb'} """
|
try:
name = SHORT_MODELNAMES[self.model]
#add index if specific model is indexed
if hasattr(self,'index'):
name += '-{}'.format(self.index)
return name
except KeyError:
raise KeyError('No short name for model: %s' % self.model)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def constrain_secdepth(self, thresh):
""" Constrain the observed secondary depth to be less than a given value :param thresh: Maximum allowed fractional depth for diluted secondary eclipse depth """
|
self.apply_constraint(UpperLimit(self.secondary_depth, thresh, name='secondary depth'))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prior(self):
""" Model prior for particular model. Product of eclipse probability (``self.prob``), the fraction of scenario that is allowed by the various constraints (``self.selectfrac``), and all additional factors in ``self.priorfactors``. """
|
prior = self.prob * self.selectfrac
for f in self.priorfactors:
prior *= self.priorfactors[f]
return prior
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_priorfactor(self,**kwargs):
"""Adds given values to priorfactors If given keyword exists already, error will be raised to use :func:`EclipsePopulation.change_prior` instead. """
|
for kw in kwargs:
if kw in self.priorfactors:
logging.error('%s already in prior factors for %s. use change_prior function instead.' % (kw,self.model))
continue
else:
self.priorfactors[kw] = kwargs[kw]
logging.info('%s added to prior factors for %s' % (kw,self.model))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def change_prior(self, **kwargs):
""" Changes existing priorfactors. If given keyword isn't already in priorfactors, then will be ignored. """
|
for kw in kwargs:
if kw in self.priorfactors:
self.priorfactors[kw] = kwargs[kw]
logging.info('{0} changed to {1} for {2} model'.format(kw,kwargs[kw],
self.model))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _density(self, logd, dur, slope):
""" Evaluate KDE at given points. Prepares data according to whether sklearn or scipy KDE in use. :param log, dur, slope: Trapezoidal shape parameters. """
|
if self.sklearn_kde:
#TODO: fix preprocessing
pts = np.array([(logd - self.mean_logdepth)/self.std_logdepth,
(dur - self.mean_dur)/self.std_dur,
(slope - self.mean_slope)/self.std_slope])
return self.kde.score_samples(pts)
else:
return self.kde(np.array([logd, dur, slope]))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lhood(self, trsig, recalc=False, cachefile=None):
"""Returns likelihood of transit signal Returns sum of ``trsig`` MCMC samples evaluated at ``self.kde``. :param trsig: :class:`vespa.TransitSignal` object. :param recalc: (optional) Whether to recalculate likelihood (if calculation is cached). :param cachefile: (optional) File that holds likelihood calculation cache. """
|
if not hasattr(self,'kde'):
self._make_kde()
if cachefile is None:
cachefile = self.lhoodcachefile
if cachefile is None:
cachefile = 'lhoodcache.dat'
lhoodcache = _loadcache(cachefile)
key = hashcombine(self, trsig)
if key in lhoodcache and not recalc:
return lhoodcache[key]
if self.is_ruled_out:
return 0
N = trsig.kde.dataset.shape[1]
lh = self.kde(trsig.kde.dataset).sum() / N
with open(cachefile, 'a') as fout:
fout.write('%i %g\n' % (key, lh))
return lh
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_hdf(cls, filename, path=''):
#perhaps this doesn't need to be written? """ Loads EclipsePopulation from HDF file Also runs :func:`EclipsePopulation._make_kde` if it can. :param filename: HDF file :param path: (optional) Path within HDF file """
|
new = StarPopulation.load_hdf(filename, path=path)
#setup lazy loading of starmodel if present
try:
with pd.HDFStore(filename) as store:
if '{}/starmodel'.format(path) in store:
new._starmodel = None
new._starmodel_file = filename
new._starmodel_path = '{}/starmodel'.format(path)
except:
pass
try:
new._make_kde()
except NoTrapfitError:
logging.warning('Trapezoid fit not done.')
return new
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def constraints(self):
""" Unique list of constraints among all populations in set. """
|
cs = []
for pop in self.poplist:
cs += [c for c in pop.constraints]
return list(set(cs))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_hdf(self, filename, path='', overwrite=False):
""" Saves PopulationSet to HDF file. """
|
if os.path.exists(filename) and overwrite:
os.remove(filename)
for pop in self.poplist:
name = pop.modelshort
pop.save_hdf(filename, path='{}/{}'.format(path,name), append=True)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_hdf(cls, filename, path=''):
""" Loads PopulationSet from file """
|
with pd.HDFStore(filename) as store:
models = []
types = []
for k in store.keys():
m = re.search('/(\S+)/stars', k)
if m:
models.append(m.group(1))
types.append(store.get_storer(m.group(0)).attrs.poptype)
poplist = []
for m,t in zip(models,types):
poplist.append(t().load_hdf(filename, path='{}/{}'.format(path,m)))
return cls(poplist)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_population(self,pop):
"""Adds population to PopulationSet """
|
if pop.model in self.modelnames:
raise ValueError('%s model already in PopulationSet.' % pop.model)
self.modelnames.append(pop.model)
self.shortmodelnames.append(pop.modelshort)
self.poplist.append(pop)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_population(self,pop):
"""Removes population from PopulationSet """
|
iremove=None
for i in range(len(self.poplist)):
if self.modelnames[i]==self.poplist[i].model:
iremove=i
if iremove is not None:
self.modelnames.pop(i)
self.shortmodelnames.pop(i)
self.poplist.pop(i)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def colordict(self):
""" Dictionary holding colors that correspond to constraints. """
|
d = {}
i=0
n = len(self.constraints)
for c in self.constraints:
#self.colordict[c] = colors[i % 6]
d[c] = cm.jet(1.*i/n)
i+=1
return d
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def priorfactors(self):
"""Combinartion of priorfactors from all populations """
|
priorfactors = {}
for pop in self.poplist:
for f in pop.priorfactors:
if f in priorfactors:
if pop.priorfactors[f] != priorfactors[f]:
raise ValueError('prior factor %s is inconsistent!' % f)
else:
priorfactors[f] = pop.priorfactors[f]
return priorfactors
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_multicolor_transit(self,band,depth):
""" Applies constraint corresponding to measuring transit in different band This is not implemented yet. """
|
if '{} band transit'.format(band) not in self.constraints:
self.constraints.append('{} band transit'.format(band))
for pop in self.poplist:
pop.apply_multicolor_transit(band,depth)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_maxrad(self,newrad):
""" Sets max allowed radius in populations. Doesn't operate via the :class:`stars.Constraint` protocol; rather just rescales the sky positions for the background objects and recalculates sky area, etc. """
|
if not isinstance(newrad, Quantity):
newrad = newrad * u.arcsec
#if 'Rsky' not in self.constraints:
# self.constraints.append('Rsky')
for pop in self.poplist:
if not pop.is_specific:
try:
pop.maxrad = newrad
except AttributeError:
pass
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_dmaglim(self,dmaglim=None):
""" Applies a constraint that sets the maximum brightness for non-target star :func:`stars.StarPopulation.set_dmaglim` not yet implemented. """
|
raise NotImplementedError
if 'bright blend limit' not in self.constraints:
self.constraints.append('bright blend limit')
for pop in self.poplist:
if not hasattr(pop,'dmaglim') or pop.is_specific:
continue
if dmaglim is None:
dmag = pop.dmaglim
else:
dmag = dmaglim
pop.set_dmaglim(dmag)
self.dmaglim = dmaglim
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_trend_constraint(self, limit, dt, **kwargs):
""" Applies constraint corresponding to RV trend non-detection to each population See :func:`stars.StarPopulation.apply_trend_constraint`; all arguments passed to that function for each population. """
|
if 'RV monitoring' not in self.constraints:
self.constraints.append('RV monitoring')
for pop in self.poplist:
if not hasattr(pop,'dRV'):
continue
pop.apply_trend_constraint(limit, dt, **kwargs)
self.trend_limit = limit
self.trend_dt = dt
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_secthresh(self, secthresh, **kwargs):
"""Applies secondary depth constraint to each population See :func:`EclipsePopulation.apply_secthresh`; all arguments passed to that function for each population. """
|
if 'secondary depth' not in self.constraints:
self.constraints.append('secondary depth')
for pop in self.poplist:
if not isinstance(pop, EclipsePopulation_Px2):
pop.apply_secthresh(secthresh, **kwargs)
self.secthresh = secthresh
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def constrain_property(self,prop,**kwargs):
""" Constrains property for each population See :func:`vespa.stars.StarPopulation.constrain_property`; all arguments passed to that function for each population. """
|
if prop not in self.constraints:
self.constraints.append(prop)
for pop in self.poplist:
try:
pop.constrain_property(prop,**kwargs)
except AttributeError:
logging.info('%s model does not have property stars.%s (constraint not applied)' % (pop.model,prop))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace_constraint(self,name,**kwargs):
""" Replaces removed constraint in each population. See :func:`vespa.stars.StarPopulation.replace_constraint` """
|
for pop in self.poplist:
pop.replace_constraint(name,**kwargs)
if name not in self.constraints:
self.constraints.append(name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_constraint(self,*names):
""" Removes constraint from each population See :func:`vespa.stars.StarPopulation.remove_constraint """
|
for name in names:
for pop in self.poplist:
if name in pop.constraints:
pop.remove_constraint(name)
else:
logging.info('%s model does not have %s constraint' % (pop.model,name))
if name in self.constraints:
self.constraints.remove(name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_cc(self, cc, **kwargs):
""" Applies contrast curve constraint to each population See :func:`vespa.stars.StarPopulation.apply_cc`; all arguments passed to that function for each population. """
|
if type(cc)==type(''):
pass
if cc.name not in self.constraints:
self.constraints.append(cc.name)
for pop in self.poplist:
if not pop.is_specific:
try:
pop.apply_cc(cc, **kwargs)
except AttributeError:
logging.info('%s cc not applied to %s model' % (cc.name,pop.model))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_vcc(self,vcc):
""" Applies velocity contrast curve constraint to each population See :func:`vespa.stars.StarPopulation.apply_vcc`; all arguments passed to that function for each population. """
|
if 'secondary spectrum' not in self.constraints:
self.constraints.append('secondary spectrum')
for pop in self.poplist:
if not pop.is_specific:
try:
pop.apply_vcc(vcc)
except:
logging.info('VCC constraint not applied to %s model' % (pop.model))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_trilegal(filename,ra,dec,folder='.', galactic=False, filterset='kepler_2mass',area=1,maglim=27,binaries=False, trilegal_version='1.6',sigma_AV=0.1,convert_h5=True):
"""Runs get_trilegal perl script; optionally saves output into .h5 file Depends on a perl script provided by L. Girardi; calls the web form simulation, downloads the file, and (optionally) converts to HDF format. Uses A_V at infinity from :func:`utils.get_AV_infinity`. .. note:: Would be desirable to re-write the get_trilegal script all in python. :param filename: Desired output filename. If extension not provided, it will be added. :param ra,dec: Coordinates (ecliptic) for line-of-sight simulation. :param folder: (optional) Folder to which to save file. *Acknowledged, file control in this function is a bit wonky.* :param filterset: (optional) Filter set for which to call TRILEGAL. :param area: (optional) Area of TRILEGAL simulation [sq. deg] :param maglim: (optional) Limiting magnitude in first mag (by default will be Kepler band) If want to limit in different band, then you have to got directly to the ``get_trilegal`` perl script. :param binaries: (optional) Whether to have TRILEGAL include binary stars. Default ``False``. :param trilegal_version: (optional) Default ``'1.6'``. :param sigma_AV: (optional) Fractional spread in A_V along the line of sight. :param convert_h5: (optional) If true, text file downloaded from TRILEGAL will be converted into a ``pandas.DataFrame`` stored in an HDF file, with ``'df'`` path. """
|
if galactic:
l, b = ra, dec
else:
try:
c = SkyCoord(ra,dec)
except UnitsError:
c = SkyCoord(ra,dec,unit='deg')
l,b = (c.galactic.l.value,c.galactic.b.value)
if os.path.isabs(filename):
folder = ''
if not re.search('\.dat$',filename):
outfile = '{}/{}.dat'.format(folder,filename)
else:
outfile = '{}/{}'.format(folder,filename)
AV = get_AV_infinity(l,b,frame='galactic')
#cmd = 'get_trilegal %s %f %f %f %i %.3f %.2f %s 1 %.1f %s' % (trilegal_version,l,b,
# area,binaries,AV,sigma_AV,
# filterset,maglim,outfile)
#sp.Popen(cmd,shell=True).wait()
trilegal_webcall(trilegal_version,l,b,area,binaries,AV,sigma_AV,filterset,maglim,outfile)
if convert_h5:
df = pd.read_table(outfile, sep='\s+', skipfooter=1, engine='python')
df = df.rename(columns={'#Gc':'Gc'})
for col in df.columns:
if col not in NONMAG_COLS:
df.rename(columns={col:'{}_mag'.format(col)},inplace=True)
if not re.search('\.h5$', filename):
h5file = '{}/{}.h5'.format(folder,filename)
else:
h5file = '{}/{}'.format(folder,filename)
df.to_hdf(h5file,'df')
with pd.HDFStore(h5file) as store:
attrs = store.get_storer('df').attrs
attrs.trilegal_args = {'version':trilegal_version,
'ra':ra, 'dec':dec,
'l':l,'b':b,'area':area,
'AV':AV, 'sigma_AV':sigma_AV,
'filterset':filterset,
'maglim':maglim,
'binaries':binaries}
os.remove(outfile)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log_wrapper(self):
""" Wrapper to set logging parameters for output """
|
log = logging.getLogger('client.py')
# Set the log format and log level
try:
debug = self.params["debug"]
log.setLevel(logging.DEBUG)
except KeyError:
log.setLevel(logging.INFO)
# Set the log format.
stream = logging.StreamHandler()
logformat = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%b %d %H:%M:%S')
stream.setFormatter(logformat)
log.addHandler(stream)
return log
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode_setid(encoded):
"""Decode setid as uint128"""
|
try:
lo, hi = struct.unpack('<QQ', b32decode(encoded.upper() + '======'))
except struct.error:
raise ValueError('Cannot decode {!r}'.format(encoded))
return (hi << 64) + lo
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encode_setid(uint128):
"""Encode uint128 setid as stripped b32encoded string"""
|
hi, lo = divmod(uint128, 2**64)
return b32encode(struct.pack('<QQ', lo, hi))[:-6].lower()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _reduce_opacity(self, watermark, opacity):
""" Returns an image with reduced opacity. Converts image to RGBA if needs. Simple watermark.opacity(65535 - int(65535 * opacity) would not work for images with the Opacity channel (RGBA images). So we have to convert RGB or any other type to RGBA in this case """
|
if watermark.type() != ImageType.TrueColorMatteType:
watermark.type(ImageType.TrueColorMatteType)
depth = 255 - int(255 * opacity)
watermark.quantumOperator(ChannelType.OpacityChannel, QuOp.MaxQuantumOp, depth)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cleanup_relations(self):
"""Cleanup listing relations"""
|
collections = self.collections
for relation in [x for col in collections.values()
for x in col.model.relations.values()]:
db.session.query(relation)\
.filter(~relation.listing.any())\
.delete(synchronize_session=False)
db.session.commit()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def marvcli_cleanup(ctx, discarded, unused_tags):
"""Cleanup unused tags and discarded datasets."""
|
if not any([discarded, unused_tags]):
click.echo(ctx.get_help())
ctx.exit(1)
site = create_app().site
if discarded:
site.cleanup_discarded()
if unused_tags:
site.cleanup_tags()
site.cleanup_relations()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def marvcli_develop_server(port, public):
"""Run development webserver. ATTENTION: By default it is only served on localhost. To run it within a container and access it from the outside, you need to forward the port and tell it to listen on all IPs instead of only localhost. """
|
from flask_cors import CORS
app = create_app(push=False)
app.site.load_for_web()
CORS(app)
class IPDBMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
from ipdb import launch_ipdb_on_exception
with launch_ipdb_on_exception():
appiter = self.app(environ, start_response)
for item in appiter:
yield item
app.debug = True
if IPDB:
app.wsgi_app = IPDBMiddleware(app.wsgi_app)
app.run(use_debugger=False,
use_reloader=False,
host=('0.0.0.0' if public else '127.0.0.1'),
port=port,
threaded=False)
else:
app.run(host=('0.0.0.0' if public else '127.0.0.1'),
port=port,
reloader_type='watchdog',
threaded=False)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def marvcli_discard(datasets, all_nodes, nodes, tags, comments, confirm):
"""Mark DATASETS to be discarded or discard associated data. Without any options the specified datasets are marked to be discarded via `marv cleanup --discarded`. Use `marv undiscard` to undo this operation. Otherwise, selected data associated with the specified datasets is discarded right away. """
|
mark_discarded = not any([all_nodes, nodes, tags, comments])
site = create_app().site
setids = parse_setids(datasets)
if tags or comments:
if confirm:
msg = ' and '.join(filter(None, ['tags' if tags else None,
'comments' if comments else None]))
click.echo('About to delete {}'.format(msg))
click.confirm('This cannot be undone. Do you want to continue?', abort=True)
ids = [x[0] for x in db.session.query(Dataset.id).filter(Dataset.setid.in_(setids))]
if tags:
where = dataset_tag.c.dataset_id.in_(ids)
stmt = dataset_tag.delete().where(where)
db.session.execute(stmt)
if comments:
comment_table = Comment.__table__
where = comment_table.c.dataset_id.in_(ids)
stmt = comment_table.delete().where(where)
db.session.execute(stmt)
if nodes or all_nodes:
storedir = site.config.marv.storedir
for setid in setids:
setdir = os.path.join(storedir, setid)
# TODO: see where we are getting with dep tree tables
if mark_discarded:
dataset = Dataset.__table__
stmt = dataset.update()\
.where(dataset.c.setid.in_(setids))\
.values(discarded=True)
db.session.execute(stmt)
db.session.commit()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def marvcli_undiscard(datasets):
"""Undiscard DATASETS previously discarded."""
|
create_app()
setids = parse_setids(datasets, discarded=True)
dataset = Dataset.__table__
stmt = dataset.update()\
.where(dataset.c.setid.in_(setids))\
.values(discarded=False)
db.session.execute(stmt)
db.session.commit()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def marvcli_restore(file):
"""Restore previously dumped database"""
|
data = json.load(file)
site = create_app().site
site.restore_database(**data)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def marvcli_query(ctx, list_tags, collections, discarded, outdated, path, tags, null):
"""Query datasets. Use --collection=* to list all datasets across all collections. """
|
if not any([collections, discarded, list_tags, outdated, path, tags]):
click.echo(ctx.get_help())
ctx.exit(1)
sep = '\x00' if null else '\n'
site = create_app().site
if '*' in collections:
collections = None
else:
for col in collections:
if col not in site.collections:
ctx.fail('Unknown collection: {}'.format(col))
if list_tags:
tags = site.listtags(collections)
if tags:
click.echo(sep.join(tags), nl=not null)
else:
click.echo('no tags', err=True)
return
setids = site.query(collections, discarded, outdated, path, tags)
if setids:
sep = '\x00' if null else '\n'
click.echo(sep.join(setids), nl=not null)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def marvcli_tag(ctx, add, remove, datasets):
"""Add or remove tags to datasets"""
|
if not any([add, remove]) or not datasets:
click.echo(ctx.get_help())
ctx.exit(1)
app = create_app()
setids = parse_setids(datasets)
app.site.tag(setids, add, remove)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def marvcli_comment_add(user, message, datasets):
"""Add comment as user for one or more datasets"""
|
app = create_app()
try:
db.session.query(User).filter(User.name==user).one()
except NoResultFound:
click.echo("ERROR: No such user '{}'".format(user), err=True)
sys.exit(1)
ids = parse_setids(datasets, dbids=True)
app.site.comment(user, message, ids)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def marvcli_comment_list(datasets):
"""Lists comments for datasets. Output: setid comment_id date time author message """
|
app = create_app()
ids = parse_setids(datasets, dbids=True)
comments = db.session.query(Comment)\
.options(db.joinedload(Comment.dataset))\
.filter(Comment.dataset_id.in_(ids))
for comment in sorted(comments, key=lambda x: (x.dataset._setid, x.id)):
print(comment.dataset.setid, comment.id,
datetime.datetime.fromtimestamp(int(comment.time_added / 1000)),
comment.author, repr(comment.text))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def marvcli_comment_rm(ids):
"""Remove comments. Remove comments by id as given in second column of: marv comment list """
|
app = create_app()
db.session.query(Comment)\
.filter(Comment.id.in_(ids))\
.delete(synchronize_session=False)
db.session.commit()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def marvcli_user_list():
"""List existing users"""
|
app = create_app()
for name in db.session.query(User.name).order_by(User.name):
click.echo(name[0])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def marvcli_user_rm(ctx, username):
"""Remove a user"""
|
app = create_app()
try:
app.um.user_rm(username)
except ValueError as e:
ctx.fail(e.args[0])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def watermark(self, image, options):
""" Wrapper for ``_watermark`` Takes care of all the options handling. """
|
watermark_img = options.get("watermark", settings.THUMBNAIL_WATERMARK)
if not watermark_img:
raise AttributeError("No THUMBNAIL_WATERMARK defined or set on tag.")
watermark_path = find(watermark_img)
if not watermark_path:
raise RuntimeError("Could not find the configured watermark file.")
if not os.path.isfile(watermark_path):
raise RuntimeError("Set watermark does not point to a file.")
if "cropbox" not in options:
options["cropbox"] = None
if "watermark_alpha" not in options:
options["watermark_alpha"] = settings.THUMBNAIL_WATERMARK_OPACITY
mark_sizes = options.get("watermark_size", settings.THUMBNAIL_WATERMARK_SIZE)
if mark_sizes:
try:
options["watermark_size"] = parse_geometry(
mark_sizes, self.get_image_ratio(image, options)
)
except TypeError as e:
raise TypeError(
"Please, update sorl-thumbnail package version to >= 11.12b. %s"
% e
)
else:
options["watermark_size"] = False
if "watermark_pos" not in options:
options["watermark_pos"] = settings.THUMBNAIL_WATERMARK_POSITION
return self._watermark(
image,
watermark_path,
options["watermark_alpha"],
options["watermark_size"],
options["watermark_pos"],
)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_funcs(dataset, setdir, store):
"""Functions available for listing columns and filters."""
|
return {
'cat': lambda *lists: [x for lst in lists for x in lst],
'comments': lambda: None,
'detail_route': detail_route,
'format': lambda fmt, *args: fmt.format(*args),
'get': partial(getnode, dataset, setdir, store),
'join': lambda sep, *args: sep.join([x for x in args if x]),
'len': len,
'link': (lambda href, title, target=None:
{'href': href or "",
'title': title or "",
'target': '_blank' if target is None else target}),
'list': lambda *x: filter(None, list(x)),
'max': max,
'min': min,
'status': lambda: ['#STATUS#'],
'sum': sum,
'tags': lambda: ['#TAGS#'],
'trace': print_trace,
}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_summary_funcs(rows, ids):
"""Functions available for listing summary fields."""
|
return {
'len': len,
'list': lambda *x: filter(None, list(x)),
'max': max,
'min': min,
'rows': partial(summary_rows, rows, ids),
'sum': sum,
'trace': print_trace
}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cached_property(func):
"""Create read-only property that caches its function's value"""
|
@functools.wraps(func)
def cached_func(self):
cacheattr = '_{}'.format(func.func_name)
try:
return getattr(self, cacheattr)
except AttributeError:
value = func(self)
setattr(self, cacheattr, value)
return value
return property(cached_func)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_stream(name, **header):
"""Create a stream for publishing messages. All keyword arguments will be used to form the header. """
|
assert isinstance(name, basestring), name
return CreateStream(parent=None, name=name, group=False, header=header)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pull(handle, enumerate=False):
"""Pulls next message for handle. Args: handle: A :class:`.stream.Handle` or GroupHandle. enumerate (bool):
boolean to indicate whether a tuple ``(idx, msg)`` should be returned, not unlike Python's enumerate(). Returns: A :class:`Pull` task to be yielded. Marv will send the corresponding message as soon as it is available. For groups this message will be a handle to a member of the group. Members of groups are either streams or groups. Examples: Pulling (enumerated) message from stream:: msg = yield marv.pull(stream) idx, msg = yield marv.pull(stream, enumerate=True) Pulling stream from group and message from stream:: stream = yield marv.pull(group) # a group of streams msg = yield marv.pull(stream) """
|
assert isinstance(handle, Handle), handle
return Pull(handle, enumerate)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_geometry(geometry, ratio=None):
""" Enhanced parse_geometry parser with percentage support. """
|
if "%" not in geometry:
# fall back to old parser
return xy_geometry_parser(geometry, ratio)
# parse with float so geometry strings like "42.11%" are possible
return float(geometry.strip("%")) / 100.0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def image(cam):
"""Extract first image of input stream to jpg file. Args: cam: Input stream of raw rosbag messages. Returns: File instance for first image of input stream. """
|
# Set output stream title and pull first message
yield marv.set_header(title=cam.topic)
msg = yield marv.pull(cam)
if msg is None:
return
# Deserialize raw ros message
pytype = get_message_type(cam)
rosmsg = pytype()
rosmsg.deserialize(msg.data)
# Write image to jpeg and push it to output stream
name = '{}.jpg'.format(cam.topic.replace('/', ':')[1:])
imgfile = yield marv.make_file(name)
img = imgmsg_to_cv2(rosmsg, "rgb8")
cv2.imwrite(imgfile.path, img, (cv2.IMWRITE_JPEG_QUALITY, 60))
yield marv.push(imgfile)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def image_section(image, title):
"""Create detail section with one image. Args: title (str):
Title to be displayed for detail section. image: marv image file. Returns One detail section. """
|
# pull first image
img = yield marv.pull(image)
if img is None:
return
# create image widget and section containing it
widget = {'title': image.title, 'image': {'src': img.relpath}}
section = {'title': title, 'widgets': [widget]}
yield marv.push(section)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def images(cam):
"""Extract images from input stream to jpg files. Args: cam: Input stream of raw rosbag messages. Returns: File instances for images of input stream. """
|
# Set output stream title and pull first message
yield marv.set_header(title=cam.topic)
# Fetch and process first 20 image messages
name_template = '%s-{}.jpg' % cam.topic.replace('/', ':')[1:]
while True:
idx, msg = yield marv.pull(cam, enumerate=True)
if msg is None or idx >= 20:
break
# Deserialize raw ros message
pytype = get_message_type(cam)
rosmsg = pytype()
rosmsg.deserialize(msg.data)
# Write image to jpeg and push it to output stream
img = imgmsg_to_cv2(rosmsg, "rgb8")
name = name_template.format(idx)
imgfile = yield marv.make_file(name)
cv2.imwrite(imgfile.path, img)
yield marv.push(imgfile)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gallery_section(images, title):
"""Create detail section with gallery. Args: title (str):
Title to be displayed for detail section. images: stream of marv image files Returns One detail section. """
|
# pull all images
imgs = []
while True:
img = yield marv.pull(images)
if img is None:
break
imgs.append({'src': img.relpath})
if not imgs:
return
# create gallery widget and section containing it
widget = {'title': images.title, 'gallery': {'images': imgs}}
section = {'title': title, 'widgets': [widget]}
yield marv.push(section)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filesizes(images):
"""Stat filesize of files. Args: images: stream of marv image files Returns: Stream of filesizes """
|
# Pull each image and push its filesize
while True:
img = yield marv.pull(images)
if img is None:
break
yield marv.push(img.size)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def name(name, validator=None):
""" Set a name on a validator callable. Useful for user-friendly reporting when using lambdas to populate the [`Invalid.expected`](#invalid) field: ```python from good import Schema, name Schema(lambda x: int(x))('a') #-> Invalid: invalid literal for int():
expected <lambda>(), got Schema(name('int()', lambda x: int(x))('a') #-> Invalid: invalid literal for int():
expected int(), got a ``` Note that it is only useful with lambdas, since function name is used if available: see notes on [Schema Callables](#callables). :param name: Name to assign on the validator callable :type name: unicode :param validator: Validator callable. If not provided -- a decorator is returned instead: ```python from good import name @name(u'int()') def int(v):
return int(v) ``` :type validator: callable :return: The same validator callable :rtype: callable """
|
# Decorator mode
if validator is None:
def decorator(f):
f.name = name
return f
return decorator
# Direct mode
validator.name = name
return validator
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stringmethod(func):
""" Validator factory which call a single method on the string. """
|
method_name = func()
@wraps(func)
def factory():
def validator(v):
if not isinstance(v, six.string_types):
raise Invalid(_(u'Not a string'), get_type_name(six.text_type), get_type_name(type(v)))
return getattr(v, method_name)()
return validator
return factory
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_z(cls, offset):
""" Parse %z offset into `timedelta` """
|
assert len(offset) == 5, 'Invalid offset string format, must be "+HHMM"'
return timedelta(hours=int(offset[:3]), minutes=int(offset[0] + offset[3:]))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_z(cls, offset):
""" Format `timedelta` into %z """
|
sec = offset.total_seconds()
return '{s}{h:02d}{m:02d}'.format(s='-' if sec<0 else '+', h=abs(int(sec/3600)), m=int((sec%3600)/60))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def strptime(cls, value, format):
""" Parse a datetime string using the provided format. This also emulates `%z` support on Python 2. :param value: Datetime string :type value: str :param format: Format to use for parsing :type format: str :rtype: datetime :raises ValueError: Invalid format :raises TypeError: Invalid input type """
|
# Simplest case: direct parsing
if cls.python_supports_z or '%z' not in format:
return datetime.strptime(value, format)
else:
# %z emulation case
assert format[-2:] == '%z', 'For performance, %z is only supported at the end of the string'
# Parse
dt = datetime.strptime(value[:-5], format[:-2]) # cutoff '%z' and '+0000'
tz = FixedOffset(value[-5:]) # parse %z into tzinfo
# Localize
return dt.replace(tzinfo=tz)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_random_type(valid):
""" Generate a random type and samples for it. :param valid: Generate valid samples? :type valid: bool :return: type, sample-generator :rtype: type, generator """
|
type = choice(['int', 'str'])
r = lambda: randrange(-1000000000, 1000000000)
if type == 'int':
return int, (r() if valid else str(r()) for i in itertools.count())
elif type == 'str':
return str, (str(r()) if valid else r() for i in itertools.count())
else:
raise AssertionError('!')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_random_schema(valid):
""" Generate a random plain schema, and a sample generation function. :param valid: Generate valid samples? :type valid: bool :returns: schema, sample-generator :rtype: *, generator """
|
schema_type = choice(['literal', 'type'])
if schema_type == 'literal':
type, gen = generate_random_type(valid)
value = next(gen)
return value, (value if valid else None for i in itertools.count())
elif schema_type == 'type':
return generate_random_type(valid)
else:
raise AssertionError('!')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_dict_schema(size, valid):
""" Generate a schema dict of size `size` using library `lib`. In addition, it returns samples generator :param size: Schema size :type size: int :param samples: The number of samples to generate :type samples: int :param valid: Generate valid samples? :type valid: bool :returns """
|
schema = {}
generator_items = []
# Generate schema
for i in range(0, size):
while True:
key_schema, key_generator = generate_random_schema(valid)
if key_schema not in schema:
break
value_schema, value_generator = generate_random_schema(valid)
schema[key_schema] = value_schema
generator_items.append((key_generator, value_generator))
# Samples
generator = ({next(k_gen): next(v_gen) for k_gen, v_gen in generator_items} for i in itertools.count())
# Finish
return schema, generator
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _calc_q_statistic(x, h, nt):
"""Calculate Portmanteau statistics up to a lag of h. """
|
t, m, n = x.shape
# covariance matrix of x
c0 = acm(x, 0)
# LU factorization of covariance matrix
c0f = sp.linalg.lu_factor(c0, overwrite_a=False, check_finite=True)
q = np.zeros((3, h + 1))
for l in range(1, h + 1):
cl = acm(x, l)
# calculate tr(cl' * c0^-1 * cl * c0^-1)
a = sp.linalg.lu_solve(c0f, cl)
b = sp.linalg.lu_solve(c0f, cl.T)
tmp = a.dot(b).trace()
# Box-Pierce
q[0, l] = tmp
# Ljung-Box
q[1, l] = tmp / (nt - l)
# Li-McLeod
q[2, l] = tmp
q *= nt
q[1, :] *= (nt + 2)
q = np.cumsum(q, axis=1)
for l in range(1, h+1):
q[2, l] = q[0, l] + m * m * l * (l + 1) / (2 * nt)
return q
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _calc_q_h0(n, x, h, nt, n_jobs=1, verbose=0, random_state=None):
"""Calculate q under the null hypothesis of whiteness. """
|
rng = check_random_state(random_state)
par, func = parallel_loop(_calc_q_statistic, n_jobs, verbose)
q = par(func(rng.permutation(x.T).T, h, nt) for _ in range(n))
return np.array(q)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy(self):
"""Create a copy of the VAR model."""
|
other = self.__class__(self.p)
other.coef = self.coef.copy()
other.residuals = self.residuals.copy()
other.rescov = self.rescov.copy()
return other
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_yw(self, acms):
"""Determine VAR model from autocorrelation matrices by solving the Yule-Walker equations. Parameters acms : array, shape (n_lags, n_channels, n_channels) acms[l] contains the autocorrelation matrix at lag l. The highest lag must equal the model order. Returns ------- self : :class:`VAR` The :class:`VAR` object to facilitate method chaining (see usage example). """
|
if len(acms) != self.p + 1:
raise ValueError("Number of autocorrelation matrices ({}) does not"
" match model order ({}) + 1.".format(len(acms),
self.p))
n_channels = acms[0].shape[0]
acm = lambda l: acms[l] if l >= 0 else acms[-l].T
r = np.concatenate(acms[1:], 0)
rr = np.array([[acm(m-k) for k in range(self.p)]
for m in range(self.p)])
rr = np.concatenate(np.concatenate(rr, -2), -1)
c = sp.linalg.solve(rr, r)
# calculate residual covariance
r = acm(0)
for k in range(self.p):
bs = k * n_channels
r -= np.dot(c[bs:bs + n_channels, :].T, acm(k + 1))
self.coef = np.concatenate([c[m::n_channels, :]
for m in range(n_channels)]).T
self.rescov = r
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict(self, data):
"""Predict samples on actual data. The result of this function is used for calculating the residuals. Parameters data : array, shape (trials, channels, samples) or (channels, samples) Epoched or continuous data set. Returns ------- predicted : array, shape `data`.shape Data as predicted by the VAR model. Notes ----- Residuals are obtained by r = x - var.predict(x) """
|
data = atleast_3d(data)
t, m, l = data.shape
p = int(np.shape(self.coef)[1] / m)
y = np.zeros(data.shape)
if t > l - p: # which takes less loop iterations
for k in range(1, p + 1):
bp = self.coef[:, (k - 1)::p]
for n in range(p, l):
y[:, :, n] += np.dot(data[:, :, n - k], bp.T)
else:
for k in range(1, p + 1):
bp = self.coef[:, (k - 1)::p]
for s in range(t):
y[s, :, p:] += np.dot(bp, data[s, :, (p - k):(l - k)])
return y
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_stable(self):
"""Test if VAR model is stable. This function tests stability of the VAR model as described in [1]_. Returns ------- out : bool True if the model is stable. References .. [1] H. Lütkepohl, "New Introduction to Multiple Time Series Analysis", 2005, Springer, Berlin, Germany. """
|
m, mp = self.coef.shape
p = mp // m
assert(mp == m * p) # TODO: replace with raise?
top_block = []
for i in range(p):
top_block.append(self.coef[:, i::p])
top_block = np.hstack(top_block)
im = np.eye(m)
eye_block = im
for i in range(p - 2):
eye_block = sp.linalg.block_diag(im, eye_block)
eye_block = np.hstack([eye_block, np.zeros((m * (p - 1), m))])
tmp = np.vstack([top_block, eye_block])
return np.all(np.abs(np.linalg.eig(tmp)[0]) < 1)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch(dataset="mi", datadir=datadir):
"""Fetch example dataset. If the requested dataset is not found in the location specified by `datadir`, the function attempts to download it. Parameters dataset : str Which dataset to load. Currently only 'mi' is supported. datadir : str Path to the storage location of example datasets. Datasets are downloaded to this location if they cannot be found. If the directory does not exist it is created. Returns ------- data : list of dicts The data set is stored in a list, where each list element corresponds to data from one subject. Each list element is a dictionary with the following keys: """
|
if dataset not in datasets:
raise ValueError("Example data '{}' not available.".format(dataset))
else:
files = datasets[dataset]["files"]
url = datasets[dataset]["url"]
md5 = datasets[dataset]["md5"]
if not isdir(datadir):
makedirs(datadir)
data = []
for n, filename in enumerate(files):
fullfile = join(datadir, filename)
if not isfile(fullfile):
with open(fullfile, "wb") as f:
response = get(join(url, filename))
f.write(response.content)
with open(fullfile, "rb") as f: # check if MD5 of downloaded file matches original hash
hash = hashlib.md5(f.read()).hexdigest()
if hash != md5[n]:
raise MD5MismatchError("MD5 hash of {} does not match {}.".format(fullfile, md5[n]))
data.append(convert(dataset, loadmat(fullfile)))
return data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def supports_undefined(self):
""" Test whether this schema supports Undefined. A Schema that supports `Undefined`, when given `Undefined`, should return some value (other than `Undefined`) without raising errors. This is designed to support a very special case like that: ```python Schema(Default(0)).supports_undefined #-> True ``` This way a validator can declare that it has a default in case no value was provided, and this case happens when: 1. A [`Required`](#required) mapping key was not provided, and it's mapped to `Default()` 2. .. no more supported cases. Yet. :rtype: bool """
|
# Test
try:
yes = self(const.UNDEFINED) is not const.UNDEFINED
except (Invalid, SchemaError):
yes = False
# Remember (lame @cached_property)
self.__dict__['supports_undefined'] = yes
return yes
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_schema_type(cls, schema):
""" Get schema type for the argument :param schema: Schema to analyze :return: COMPILED_TYPE constant :rtype: str|None """
|
schema_type = type(schema)
# Marker
if issubclass(schema_type, markers.Marker):
return const.COMPILED_TYPE.MARKER
# Marker Type
elif issubclass(schema_type, six.class_types) and issubclass(schema, markers.Marker):
return const.COMPILED_TYPE.MARKER
# CompiledSchema
elif isinstance(schema, CompiledSchema):
return const.COMPILED_TYPE.SCHEMA
else:
return primitive_type(schema)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def priority(self):
""" Get priority for this Schema. Used to sort mapping keys :rtype: int """
|
# Markers have priority set on the class
if self.compiled_type == const.COMPILED_TYPE.MARKER:
return self.compiled.priority
# Other types have static priority
return const.compiled_type_priorities[self.compiled_type]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_schemas(cls, schemas_list):
""" Sort the provided list of schemas according to their priority. This also supports markers, and markers of a single type are also sorted according to the priority of the wrapped schema. :type schemas_list: list[CompiledSchema] :rtype: list[CompiledSchema] """
|
return sorted(schemas_list,
key=lambda x: (
# Top-level priority:
# priority of the schema itself
x.priority,
# Second-level priority (for markers of the common type)
# This ensures that Optional(1) always goes before Optional(int)
x.compiled.key_schema.priority if x.compiled_type == const.COMPILED_TYPE.MARKER else 0
), reverse=True)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sub_compile(self, schema, path=None, matcher=False):
""" Compile a sub-schema :param schema: Validation schema :type schema: * :param path: Path to this schema, if any :type path: list|None :param matcher: Compile a matcher? :type matcher: bool :rtype: CompiledSchema """
|
return type(self)(
schema,
self.path + (path or []),
None,
None,
matcher
)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Invalid(self, message, expected):
""" Helper for Invalid errors. Typical use: err_type = self.Invalid(_(u'Message'), self.name) raise err_type(<provided-value>) Note: `provided` and `expected` are unicode-typecasted automatically :type message: unicode :type expected: unicode """
|
def InvalidPartial(provided, path=None, **info):
""" Create an Invalid exception
:type provided: unicode
:type path: list|None
:rtype: Invalid
"""
return Invalid(
message,
expected, #six.text_type(expected), # -- must be unicode
provided, #six.text_type(provided), # -- must be unicode
self.path + (path or []),
self.schema,
**info
)
return InvalidPartial
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_schema_compiler(self, schema):
""" Get compiler method for the provided schema :param schema: Schema to analyze :return: Callable compiled :rtype: callable|None """
|
# Schema type
schema_type = self.get_schema_type(schema)
if schema_type is None:
return None
# Compiler
compilers = {
const.COMPILED_TYPE.LITERAL: self._compile_literal,
const.COMPILED_TYPE.TYPE: self._compile_type,
const.COMPILED_TYPE.SCHEMA: self._compile_schema,
const.COMPILED_TYPE.ENUM: self._compile_enum,
const.COMPILED_TYPE.CALLABLE: self._compile_callable,
const.COMPILED_TYPE.ITERABLE: self._compile_iterable,
const.COMPILED_TYPE.MAPPING: self._compile_mapping,
const.COMPILED_TYPE.MARKER: self._compile_marker,
}
return compilers[schema_type]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compile_schema(self, schema):
""" Compile the current schema into a callable validator :return: Callable validator :rtype: callable :raises SchemaError: Schema compilation error """
|
compiler = self.get_schema_compiler(schema)
if compiler is None:
raise SchemaError(_(u'Unsupported schema data type {!r}').format(type(schema).__name__))
return compiler(schema)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compile_schema(self, schema):
""" Compile another schema """
|
assert self.matcher == schema.matcher
self.name = schema.name
self.compiled_type = schema.compiled_type
return schema.compiled
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loadmat(filename):
"""This function should be called instead of direct spio.loadmat as it cures the problem of not properly recovering python dictionaries from mat files. It calls the function check keys to cure all entries which are still mat-objects """
|
data = sploadmat(filename, struct_as_record=False, squeeze_me=True)
return _check_keys(data)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_keys(dictionary):
""" checks if entries in dictionary are mat-objects. If yes todict is called to change them to nested dictionaries """
|
for key in dictionary:
if isinstance(dictionary[key], matlab.mio5_params.mat_struct):
dictionary[key] = _todict(dictionary[key])
return dictionary
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _todict(matobj):
""" a recursive function which constructs from matobjects nested dictionaries """
|
dictionary = {}
#noinspection PyProtectedMember
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, matlab.mio5_params.mat_struct):
dictionary[strg] = _todict(elem)
else:
dictionary[strg] = elem
return dictionary
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plainica(x, reducedim=0.99, backend=None, random_state=None):
""" Source decomposition with ICA. Apply ICA to the data x, with optional PCA dimensionality reduction. Parameters x : array, shape (n_trials, n_channels, n_samples) or (n_channels, n_samples) data set reducedim : {int, float, 'no_pca'}, optional A number of less than 1 in interpreted as the fraction of variance that should remain in the data. All components that describe in total less than `1-reducedim` of the variance are removed by the PCA step. An integer numer of 1 or greater is interpreted as the number of components to keep after applying the PCA. If set to 'no_pca' the PCA step is skipped. backend : dict-like, optional Specify backend to use. When set to None the backend configured in config.backend is used. Returns ------- result : ResultICA Source decomposition """
|
x = atleast_3d(x)
t, m, l = np.shape(x)
if backend is None:
backend = scotbackend
# pre-transform the data with PCA
if reducedim == 'no pca':
c = np.eye(m)
d = np.eye(m)
xpca = x
else:
c, d, xpca = backend['pca'](x, reducedim)
# run on residuals ICA to estimate volume conduction
mx, ux = backend['ica'](cat_trials(xpca), random_state=random_state)
# correct (un)mixing matrix estimatees
mx = mx.dot(d)
ux = c.dot(ux)
class Result:
unmixing = ux
mixing = mx
return Result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _msge_with_gradient_underdetermined(data, delta, xvschema, skipstep, p):
"""Calculate mean squared generalization error and its gradient for underdetermined equation system. """
|
t, m, l = data.shape
d = None
j, k = 0, 0
nt = np.ceil(t / skipstep)
for trainset, testset in xvschema(t, skipstep):
a, b = _construct_var_eqns(atleast_3d(data[trainset, :, :]), p)
c, d = _construct_var_eqns(atleast_3d(data[testset, :, :]), p)
e = sp.linalg.inv(np.eye(a.shape[0]) * delta ** 2 + a.dot(a.T))
cc = c.transpose().dot(c)
be = b.transpose().dot(e)
bee = be.dot(e)
bea = be.dot(a)
beea = bee.dot(a)
beacc = bea.dot(cc)
dc = d.transpose().dot(c)
j += np.sum(beacc * bea - 2 * bea * dc) + np.sum(d ** 2)
k += np.sum(beea * dc - beacc * beea) * 4 * delta
return j / (nt * d.size), k / (nt * d.size)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _msge_with_gradient_overdetermined(data, delta, xvschema, skipstep, p):
"""Calculate mean squared generalization error and its gradient for overdetermined equation system. """
|
t, m, l = data.shape
d = None
l, k = 0, 0
nt = np.ceil(t / skipstep)
for trainset, testset in xvschema(t, skipstep):
a, b = _construct_var_eqns(atleast_3d(data[trainset, :, :]), p)
c, d = _construct_var_eqns(atleast_3d(data[testset, :, :]), p)
e = sp.linalg.inv(np.eye(a.shape[1]) * delta ** 2 + a.T.dot(a))
ba = b.transpose().dot(a)
dc = d.transpose().dot(c)
bae = ba.dot(e)
baee = bae.dot(e)
baecc = bae.dot(c.transpose().dot(c))
l += np.sum(baecc * bae - 2 * bae * dc) + np.sum(d ** 2)
k += np.sum(baee * dc - baecc * baee) * 4 * delta
return l / (nt * d.size), k / (nt * d.size)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_msge_with_gradient(data, delta, xvschema, skipstep, p):
"""Calculate mean squared generalization error and its gradient, automatically selecting the best function. """
|
t, m, l = data.shape
n = (l - p) * t
underdetermined = n < m * p
if underdetermined:
return _msge_with_gradient_underdetermined(data, delta, xvschema,
skipstep, p)
else:
return _msge_with_gradient_overdetermined(data, delta, xvschema,
skipstep, p)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.