text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
dotless' join, for nicer paths.
<END_TASK>
<USER_TASK:>
Description:
def djoin (*args):
"""'dotless' join, for nicer paths.""" |
from os.path import join
i = 0
alen = len (args)
while i < alen and (args[i] == '' or args[i] == '.'):
i += 1
if i == alen:
return '.'
return join (*args[i:]) |
<SYSTEM_TASK:>
Ensure the existence of a symbolic link pointing to src named dst. Returns
<END_TASK>
<USER_TASK:>
Description:
def ensure_symlink (src, dst):
"""Ensure the existence of a symbolic link pointing to src named dst. Returns
a boolean indicating whether the symlink already existed.
""" |
try:
os.symlink (src, dst)
except OSError as e:
if e.errno == 17: # EEXIST
return True
raise
return False |
<SYSTEM_TASK:>
Ensure that this path exists as a directory.
<END_TASK>
<USER_TASK:>
Description:
def ensure_dir (self, mode=0o777, parents=False):
"""Ensure that this path exists as a directory.
This function calls :meth:`mkdir` on this path, but does not raise an
exception if it already exists. It does raise an exception if this
path exists but is not a directory. If the directory is created,
*mode* is used to set the permissions of the resulting directory, with
the important caveat that the current :func:`os.umask` is applied.
It returns a boolean indicating if the directory was actually created.
If *parents* is true, parent directories will be created in the same
manner.
""" |
if parents:
p = self.parent
if p == self:
return False # can never create root; avoids loop when parents=True
p.ensure_dir (mode, True)
made_it = False
try:
self.mkdir (mode)
made_it = True
except OSError as e:
if e.errno == 17: # EEXIST?
return False # that's fine
raise # other exceptions are not fine
if not self.is_dir ():
import errno
raise OSError (errno.ENOTDIR, 'Not a directory', str(self))
return made_it |
<SYSTEM_TASK:>
Get a context manager that creates and cleans up a uniquely-named temporary
<END_TASK>
<USER_TASK:>
Description:
def make_tempfile (self, want='handle', resolution='try_unlink', suffix='', **kwargs):
"""Get a context manager that creates and cleans up a uniquely-named temporary
file with a name similar to this path.
This function returns a context manager that creates a secure
temporary file with a path similar to *self*. In particular, if
``str(self)`` is something like ``foo/bar``, the path of the temporary
file will be something like ``foo/bar.ame8_2``.
The object returned by the context manager depends on the *want* argument:
``"handle"``
An open file-like object is returned. This is the object returned by
:class:`tempfile.NamedTemporaryFile`. Its name on the filesystem is
accessible as a string as its `name` attribute, or (a customization here)
as a :class:`Path` instance as its `path` attribute.
``"path"``
The temporary file is created as in ``"handle"``, but is then immediately
closed. A :class:`Path` instance pointing to the path of the temporary file is
instead returned.
If an exception occurs inside the context manager block, the temporary file is
left lying around. Otherwise, what happens to it upon exit from the context
manager depends on the *resolution* argument:
``"try_unlink"``
Call :meth:`try_unlink` on the temporary file — no exception is raised if
the file did not exist.
``"unlink"``
Call :meth:`unlink` on the temporary file — an exception is raised if
the file did not exist.
``"keep"``
The temporary file is left lying around.
``"overwrite"``
The temporary file is :meth:`rename`-d to overwrite *self*.
For instance, when rewriting important files, it’s typical to write
the new data to a temporary file, and only rename the temporary file
to the final destination at the end — that way, if a problem happens
while writing the new data, the original file is left unmodified;
otherwise you’d be stuck with a partially-written version of the file.
This pattern can be accomplished with::
p = Path ('path/to/important/file')
with p.make_tempfile (resolution='overwrite', mode='wt') as h:
print ('important stuff goes here', file=h)
The *suffix* argument is appended to the temporary file name after the
random portion. It defaults to the empty string. If you want it to
operate as a typical filename suffix, include a leading ``"."``.
Other **kwargs** are passed to :class:`tempfile.NamedTemporaryFile`.
""" |
if want not in ('handle', 'path'):
raise ValueError ('unrecognized make_tempfile() "want" mode %r' % (want,))
if resolution not in ('unlink', 'try_unlink', 'keep', 'overwrite'):
raise ValueError ('unrecognized make_tempfile() "resolution" mode %r' % (resolution,))
return Path._PathTempfileContextManager (self, want, resolution, suffix, kwargs) |
<SYSTEM_TASK:>
Try to unlink this path. If it doesn't exist, no error is returned. Returns
<END_TASK>
<USER_TASK:>
Description:
def try_unlink (self):
"""Try to unlink this path. If it doesn't exist, no error is returned. Returns
a boolean indicating whether the path was really unlinked.
""" |
try:
self.unlink ()
return True
except OSError as e:
if e.errno == 2:
return False # ENOENT
raise |
<SYSTEM_TASK:>
Generate a sequence of objects by opening the path and unpickling items
<END_TASK>
<USER_TASK:>
Description:
def read_pickles (self):
"""Generate a sequence of objects by opening the path and unpickling items
until EOF is reached.
""" |
try:
import cPickle as pickle
except ImportError:
import pickle
with self.open (mode='rb') as f:
while True:
try:
obj = pickle.load (f)
except EOFError:
break
yield obj |
<SYSTEM_TASK:>
Read this path as one large chunk of text.
<END_TASK>
<USER_TASK:>
Description:
def read_text(self, encoding=None, errors=None, newline=None):
"""Read this path as one large chunk of text.
This function reads in the entire file as one big piece of text and
returns it. The *encoding*, *errors*, and *newline* keywords are
passed to :meth:`open`.
This is not a good way to read files unless you know for sure that they
are small.
""" |
with self.open (mode='rt', encoding=encoding, errors=errors, newline=newline) as f:
return f.read() |
<SYSTEM_TASK:>
Read this path as a TOML document.
<END_TASK>
<USER_TASK:>
Description:
def read_toml(self, encoding=None, errors=None, newline=None, **kwargs):
"""Read this path as a TOML document.
The `TOML <https://github.com/toml-lang/toml>`_ parsing is done with
the :mod:`pytoml` module. The *encoding*, *errors*, and *newline*
keywords are passed to :meth:`open`. The remaining *kwargs* are passed
to :meth:`toml.load`.
Returns the decoded data structure.
""" |
import pytoml
with self.open (mode='rt', encoding=encoding, errors=errors, newline=newline) as f:
return pytoml.load (f, **kwargs) |
<SYSTEM_TASK:>
Read this path as a YAML document.
<END_TASK>
<USER_TASK:>
Description:
def read_yaml (self, encoding=None, errors=None, newline=None, **kwargs):
"""Read this path as a YAML document.
The YAML parsing is done with the :mod:`yaml` module. The *encoding*,
*errors*, and *newline* keywords are passed to :meth:`open`. The
remaining *kwargs* are passed to :meth:`yaml.load`.
Returns the decoded data structure.
""" |
import yaml
with self.open (mode='rt', encoding=encoding, errors=errors, newline=newline) as f:
return yaml.load (f, **kwargs) |
<SYSTEM_TASK:>
A very simple decorator for creating enumerations. Unlike Python 3.4
<END_TASK>
<USER_TASK:>
Description:
def enumeration (cls):
"""A very simple decorator for creating enumerations. Unlike Python 3.4
enumerations, this just gives a way to use a class declaration to create
an immutable object containing only the values specified in the class.
If the attribute ``__pickle_compat__`` is set to True in the decorated
class, the resulting enumeration value will be callable such that
``EnumClass(x) = x``. This is needed to unpickle enumeration values that
were previously implemented using :class:`enum.Enum`.
""" |
from pwkit import unicode_to_str
name = cls.__name__
pickle_compat = getattr (cls, '__pickle_compat__', False)
def __unicode__ (self):
return '<enumeration holder %s>' % name
def getattr_error (self, attr):
raise AttributeError ('enumeration %s does not contain attribute %s' % (name, attr))
def modattr_error (self, *args, **kwargs):
raise AttributeError ('modification of %s enumeration not allowed' % name)
clsdict = {
'__doc__': cls.__doc__,
'__slots__': (),
'__unicode__': __unicode__,
'__str__': unicode_to_str,
'__repr__': unicode_to_str,
'__getattr__': getattr_error,
'__setattr__': modattr_error,
'__delattr__': modattr_error,
}
for key in dir (cls):
if not key.startswith ('_'):
clsdict[key] = getattr (cls, key)
if pickle_compat:
clsdict['__call__'] = lambda self, x: x
enumcls = type (name, (object, ), clsdict)
return enumcls () |
<SYSTEM_TASK:>
Given an ordered array of values, generate a set of slices that traverse
<END_TASK>
<USER_TASK:>
Description:
def slice_around_gaps (values, maxgap):
"""Given an ordered array of values, generate a set of slices that traverse
all of the values. Within each slice, no gap between adjacent values is
larger than `maxgap`. In other words, these slices break the array into
chunks separated by gaps of size larger than maxgap.
""" |
if not (maxgap > 0):
# above test catches NaNs, other weird cases
raise ValueError ('maxgap must be positive; got %r' % maxgap)
values = np.asarray (values)
delta = values[1:] - values[:-1]
if np.any (delta < 0):
raise ValueError ('values must be in nondecreasing order')
whgap = np.where (delta > maxgap)[0] + 1
prev_idx = None
for gap_idx in whgap:
yield slice (prev_idx, gap_idx)
prev_idx = gap_idx
yield slice (prev_idx, None) |
<SYSTEM_TASK:>
Reduce" a DataFrame by collapsing rows in grouped chunks. Returns another
<END_TASK>
<USER_TASK:>
Description:
def reduce_data_frame (df, chunk_slicers,
avg_cols=(),
uavg_cols=(),
minmax_cols=(),
nchunk_colname='nchunk',
uncert_prefix='u',
min_points_per_chunk=3):
""""Reduce" a DataFrame by collapsing rows in grouped chunks. Returns another
DataFrame with similar columns but fewer rows.
Arguments:
df
The input :class:`pandas.DataFrame`.
chunk_slicers
An iterable that returns values that are used to slice *df* with its
:meth:`pandas.DataFrame.iloc` indexer. An example value might be the
generator returned from :func:`slice_evenly_with_gaps`.
avg_cols
An iterable of names of columns that are to be reduced by taking the mean.
uavg_cols
An iterable of names of columns that are to be reduced by taking a
weighted mean.
minmax_cols
An iterable of names of columns that are to be reduced by reporting minimum
and maximum values.
nchunk_colname
The name of a column to create reporting the number of rows contributing
to each chunk.
uncert_prefix
The column name prefix for locating uncertainty estimates. By default, the
uncertainty on the column ``"temp"`` is given in the column ``"utemp"``.
min_points_per_chunk
Require at least this many rows in each chunk. Smaller chunks are discarded.
Returns a new :class:`pandas.DataFrame`.
""" |
subds = [df.iloc[idx] for idx in chunk_slicers]
subds = [sd for sd in subds if sd.shape[0] >= min_points_per_chunk]
chunked = df.__class__ ({nchunk_colname: np.zeros (len (subds), dtype=np.int)})
# Some future-proofing: allow possibility of different ways of mapping
# from a column giving a value to a column giving its uncertainty.
uncert_col_name = lambda c: uncert_prefix + c
for i, subd in enumerate (subds):
label = chunked.index[i]
chunked.loc[label,nchunk_colname] = subd.shape[0]
for col in avg_cols:
chunked.loc[label,col] = subd[col].mean ()
for col in uavg_cols:
ucol = uncert_col_name (col)
v, u = weighted_mean (subd[col], subd[ucol])
chunked.loc[label,col] = v
chunked.loc[label,ucol] = u
for col in minmax_cols:
chunked.loc[label, 'min_'+col] = subd[col].min ()
chunked.loc[label, 'max_'+col] = subd[col].max ()
return chunked |
<SYSTEM_TASK:>
Reduce" a DataFrame by collapsing rows in grouped chunks, grouping based on
<END_TASK>
<USER_TASK:>
Description:
def reduce_data_frame_evenly_with_gaps (df, valcol, target_len, maxgap, **kwargs):
""""Reduce" a DataFrame by collapsing rows in grouped chunks, grouping based on
gaps in one of the columns.
This function combines :func:`reduce_data_frame` with
:func:`slice_evenly_with_gaps`.
""" |
return reduce_data_frame (df,
slice_evenly_with_gaps (df[valcol], target_len, maxgap),
**kwargs) |
<SYSTEM_TASK:>
Smooth data series according to a window, weighting based on uncertainties.
<END_TASK>
<USER_TASK:>
Description:
def usmooth (window, uncerts, *data, **kwargs):
"""Smooth data series according to a window, weighting based on uncertainties.
Arguments:
window
The smoothing window.
uncerts
An array of uncertainties used to weight the smoothing.
data
One or more data series, of the same size as *uncerts*.
k = None
If specified, only every *k*-th point of the results will be kept. If k
is None (the default), it is set to ``window.size``, i.e. correlated
points will be discarded.
Returns: ``(s_uncerts, s_data[0], s_data[1], ...)``, the smoothed
uncertainties and data series.
Example::
u, x, y = numutil.usmooth (np.hamming (7), u, x, y)
""" |
window = np.asarray (window)
uncerts = np.asarray (uncerts)
# Hacky keyword argument handling because you can't write "def foo (*args,
# k=0)".
k = kwargs.pop ('k', None)
if len (kwargs):
raise TypeError ("smooth() got an unexpected keyword argument '%s'"
% kwargs.keys ()[0])
# Done with kwargs futzing.
if k is None:
k = window.size
conv = lambda q, r: np.convolve (q, r, mode='valid')
if uncerts is None:
w = np.ones_like (x)
else:
w = uncerts ** -2
cw = conv (w, window)
cu = np.sqrt (conv (w, window**2)) / cw
result = [cu] + [conv (w * np.asarray (x), window) / cw for x in data]
if k != 1:
result = [x[::k] for x in result]
return result |
<SYSTEM_TASK:>
Return the variance of a weighted sample.
<END_TASK>
<USER_TASK:>
Description:
def weighted_variance (x, weights):
"""Return the variance of a weighted sample.
The weighted sample mean is calculated and subtracted off, so the returned
variance is upweighted by ``n / (n - 1)``. If the sample mean is known to
be zero, you should just compute ``np.average (x**2, weights=weights)``.
""" |
n = len (x)
if n < 3:
raise ValueError ('cannot calculate meaningful variance of fewer '
'than three samples')
wt_mean = np.average (x, weights=weights)
return np.average (np.square (x - wt_mean), weights=weights) * n / (n - 1) |
<SYSTEM_TASK:>
Tophat function on the unit interval, left-exclusive and right-exclusive.
<END_TASK>
<USER_TASK:>
Description:
def unit_tophat_ee (x):
"""Tophat function on the unit interval, left-exclusive and right-exclusive.
Returns 1 if 0 < x < 1, 0 otherwise.
""" |
x = np.asarray (x)
x1 = np.atleast_1d (x)
r = ((0 < x1) & (x1 < 1)).astype (x.dtype)
if x.ndim == 0:
return np.asscalar (r)
return r |
<SYSTEM_TASK:>
Return a ufunc-like tophat function on the defined range, left-exclusive
<END_TASK>
<USER_TASK:>
Description:
def make_tophat_ee (lower, upper):
"""Return a ufunc-like tophat function on the defined range, left-exclusive
and right-exclusive. Returns 1 if lower < x < upper, 0 otherwise.
""" |
if not np.isfinite (lower):
raise ValueError ('"lower" argument must be finite number; got %r' % lower)
if not np.isfinite (upper):
raise ValueError ('"upper" argument must be finite number; got %r' % upper)
def range_tophat_ee (x):
x = np.asarray (x)
x1 = np.atleast_1d (x)
r = ((lower < x1) & (x1 < upper)).astype (x.dtype)
if x.ndim == 0:
return np.asscalar (r)
return r
range_tophat_ee.__doc__ = ('Ranged tophat function, left-exclusive and '
'right-exclusive. Returns 1 if %g < x < %g, '
'0 otherwise.') % (lower, upper)
return range_tophat_ee |
<SYSTEM_TASK:>
Return a ufunc-like tophat function on the defined range, left-exclusive
<END_TASK>
<USER_TASK:>
Description:
def make_tophat_ei (lower, upper):
"""Return a ufunc-like tophat function on the defined range, left-exclusive
and right-inclusive. Returns 1 if lower < x <= upper, 0 otherwise.
""" |
if not np.isfinite (lower):
raise ValueError ('"lower" argument must be finite number; got %r' % lower)
if not np.isfinite (upper):
raise ValueError ('"upper" argument must be finite number; got %r' % upper)
def range_tophat_ei (x):
x = np.asarray (x)
x1 = np.atleast_1d (x)
r = ((lower < x1) & (x1 <= upper)).astype (x.dtype)
if x.ndim == 0:
return np.asscalar (r)
return r
range_tophat_ei.__doc__ = ('Ranged tophat function, left-exclusive and '
'right-inclusive. Returns 1 if %g < x <= %g, '
'0 otherwise.') % (lower, upper)
return range_tophat_ei |
<SYSTEM_TASK:>
Return a ufunc-like tophat function on the defined range, left-inclusive
<END_TASK>
<USER_TASK:>
Description:
def make_tophat_ie (lower, upper):
"""Return a ufunc-like tophat function on the defined range, left-inclusive
and right-exclusive. Returns 1 if lower <= x < upper, 0 otherwise.
""" |
if not np.isfinite (lower):
raise ValueError ('"lower" argument must be finite number; got %r' % lower)
if not np.isfinite (upper):
raise ValueError ('"upper" argument must be finite number; got %r' % upper)
def range_tophat_ie (x):
x = np.asarray (x)
x1 = np.atleast_1d (x)
r = ((lower <= x1) & (x1 < upper)).astype (x.dtype)
if x.ndim == 0:
return np.asscalar (r)
return r
range_tophat_ie.__doc__ = ('Ranged tophat function, left-inclusive and '
'right-exclusive. Returns 1 if %g <= x < %g, '
'0 otherwise.') % (lower, upper)
return range_tophat_ie |
<SYSTEM_TASK:>
Return a ufunc-like tophat function on the defined range, left-inclusive
<END_TASK>
<USER_TASK:>
Description:
def make_tophat_ii (lower, upper):
"""Return a ufunc-like tophat function on the defined range, left-inclusive
and right-inclusive. Returns 1 if lower < x < upper, 0 otherwise.
""" |
if not np.isfinite (lower):
raise ValueError ('"lower" argument must be finite number; got %r' % lower)
if not np.isfinite (upper):
raise ValueError ('"upper" argument must be finite number; got %r' % upper)
def range_tophat_ii (x):
x = np.asarray (x)
x1 = np.atleast_1d (x)
r = ((lower <= x1) & (x1 <= upper)).astype (x.dtype)
if x.ndim == 0:
return np.asscalar (r)
return r
range_tophat_ii.__doc__ = ('Ranged tophat function, left-inclusive and '
'right-inclusive. Returns 1 if %g <= x <= %g, '
'0 otherwise.') % (lower, upper)
return range_tophat_ii |
<SYSTEM_TASK:>
Return a ufunc-like step function that is left-continuous. Returns 1 if
<END_TASK>
<USER_TASK:>
Description:
def make_step_lcont (transition):
"""Return a ufunc-like step function that is left-continuous. Returns 1 if
x > transition, 0 otherwise.
""" |
if not np.isfinite (transition):
raise ValueError ('"transition" argument must be finite number; got %r' % transition)
def step_lcont (x):
x = np.asarray (x)
x1 = np.atleast_1d (x)
r = (x1 > transition).astype (x.dtype)
if x.ndim == 0:
return np.asscalar (r)
return r
step_lcont.__doc__ = ('Left-continuous step function. Returns 1 if x > %g, '
'0 otherwise.') % (transition,)
return step_lcont |
<SYSTEM_TASK:>
Return a ufunc-like step function that is right-continuous. Returns 1 if
<END_TASK>
<USER_TASK:>
Description:
def make_step_rcont (transition):
"""Return a ufunc-like step function that is right-continuous. Returns 1 if
x >= transition, 0 otherwise.
""" |
if not np.isfinite (transition):
raise ValueError ('"transition" argument must be finite number; got %r' % transition)
def step_rcont (x):
x = np.asarray (x)
x1 = np.atleast_1d (x)
r = (x1 >= transition).astype (x.dtype)
if x.ndim == 0:
return np.asscalar (r)
return r
step_rcont.__doc__ = ('Right-continuous step function. Returns 1 if x >= '
'%g, 0 otherwise.') % (transition,)
return step_rcont |
<SYSTEM_TASK:>
Create a model summing multiple APEC components at fixed temperatures.
<END_TASK>
<USER_TASK:>
Description:
def make_fixed_temp_multi_apec(kTs, name_template='apec%d', norm=None):
"""Create a model summing multiple APEC components at fixed temperatures.
*kTs*
An iterable of temperatures for the components, in keV.
*name_template* = 'apec%d'
A template to use for the names of each component; it is string-formatted
with the 0-based component number as an argument.
*norm* = None
An initial normalization to be used for every component, or None to use
the Sherpa default.
Returns:
A tuple ``(total_model, sub_models)``, where *total_model* is a Sherpa
model representing the sum of the APEC components and *sub_models* is
a list of the individual models.
This function creates a vector of APEC model components and sums them.
Their *kT* parameters are set and then frozen (using
:func:`sherpa.astro.ui.freeze`), so that upon exit from this function, the
amplitude of each component is the only free parameter.
""" |
total_model = None
sub_models = []
for i, kT in enumerate(kTs):
component = ui.xsapec(name_template % i)
component.kT = kT
ui.freeze(component.kT)
if norm is not None:
component.norm = norm
sub_models.append(component)
if total_model is None:
total_model = component
else:
total_model = total_model + component
return total_model, sub_models |
<SYSTEM_TASK:>
Expand an RMF matrix stored in compressed form.
<END_TASK>
<USER_TASK:>
Description:
def expand_rmf_matrix(rmf):
"""Expand an RMF matrix stored in compressed form.
*rmf*
An RMF object as might be returned by ``sherpa.astro.ui.get_rmf()``.
Returns:
A non-sparse RMF matrix.
The Response Matrix Function (RMF) of an X-ray telescope like Chandra can
be stored in a sparse format as defined in `OGIP Calibration Memo
CAL/GEN/92-002
<https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002/cal_gen_92_002.html>`_.
For visualization and analysis purposes, it can be useful to de-sparsify
the matrices stored in this way. This function does that, returning a
two-dimensional Numpy array.
""" |
n_chan = rmf.e_min.size
n_energy = rmf.n_grp.size
expanded = np.zeros((n_energy, n_chan))
mtx_ofs = 0
grp_ofs = 0
for i in range(n_energy):
for j in range(rmf.n_grp[i]):
f = rmf.f_chan[grp_ofs]
n = rmf.n_chan[grp_ofs]
expanded[i,f:f+n] = rmf.matrix[mtx_ofs:mtx_ofs+n]
mtx_ofs += n
grp_ofs += 1
return expanded |
<SYSTEM_TASK:>
Create an "identity" ARF that has uniform sensitivity.
<END_TASK>
<USER_TASK:>
Description:
def derive_identity_arf(name, arf):
"""Create an "identity" ARF that has uniform sensitivity.
*name*
The name of the ARF object to be created; passed to Sherpa.
*arf*
An existing ARF object on which to base this one.
Returns:
A new ARF1D object that has a uniform spectral response vector.
In many X-ray observations, the relevant background signal does not behave
like an astrophysical source that is filtered through the telescope's
response functions. However, I have been unable to get current Sherpa
(version 4.9) to behave how I want when working with backround models that
are *not* filtered through these response functions. This function
constructs an "identity" ARF response function that has uniform sensitivity
as a function of detector channel.
""" |
from sherpa.astro.data import DataARF
from sherpa.astro.instrument import ARF1D
darf = DataARF(
name,
arf.energ_lo,
arf.energ_hi,
np.ones(arf.specresp.shape),
arf.bin_lo,
arf.bin_hi,
arf.exposure,
header = None,
)
return ARF1D(darf, pha=arf._pha) |
<SYSTEM_TASK:>
Get data for a quantile-quantile plot of the source data and model.
<END_TASK>
<USER_TASK:>
Description:
def get_source_qq_data(id=None):
"""Get data for a quantile-quantile plot of the source data and model.
*id*
The dataset id for which to get the data; defaults if unspecified.
Returns:
An ndarray of shape ``(3, npts)``. The first slice is the energy axis in
keV; the second is the observed values in each bin (counts, or rate, or
rate per keV, etc.); the third is the corresponding model value in each
bin.
The inputs are implicit; the data are obtained from the current state of
the Sherpa ``ui`` module.
""" |
sdata = ui.get_data(id=id)
kev = sdata.get_x()
obs_data = sdata.counts
model_data = ui.get_model(id=id)(kev)
return np.vstack((kev, obs_data, model_data)) |
<SYSTEM_TASK:>
Get data for a quantile-quantile plot of the background data and model.
<END_TASK>
<USER_TASK:>
Description:
def get_bkg_qq_data(id=None, bkg_id=None):
"""Get data for a quantile-quantile plot of the background data and model.
*id*
The dataset id for which to get the data; defaults if unspecified.
*bkg_id*
The identifier of the background; defaults if unspecified.
Returns:
An ndarray of shape ``(3, npts)``. The first slice is the energy axis in
keV; the second is the observed values in each bin (counts, or rate, or
rate per keV, etc.); the third is the corresponding model value in each
bin.
The inputs are implicit; the data are obtained from the current state of
the Sherpa ``ui`` module.
""" |
bdata = ui.get_bkg(id=id, bkg_id=bkg_id)
kev = bdata.get_x()
obs_data = bdata.counts
model_data = ui.get_bkg_model(id=id, bkg_id=bkg_id)(kev)
return np.vstack((kev, obs_data, model_data)) |
<SYSTEM_TASK:>
Make a quantile-quantile plot comparing events and a model.
<END_TASK>
<USER_TASK:>
Description:
def make_qq_plot(kev, obs, mdl, unit, key_text):
"""Make a quantile-quantile plot comparing events and a model.
*kev*
A 1D, sorted array of event energy bins measured in keV.
*obs*
A 1D array giving the number or rate of events in each bin.
*mdl*
A 1D array giving the modeled number or rate of events in each bin.
*unit*
Text describing the unit in which *obs* and *mdl* are measured; will
be shown on the plot axes.
*key_text*
Text describing the quantile-quantile comparison quantity; will be
shown on the plot legend.
Returns:
An :class:`omega.RectPlot` instance.
*TODO*: nothing about this is Sherpa-specific. Same goes for some of the
plotting routines in :mod:`pkwit.environments.casa.data`; might be
reasonable to add a submodule for generic X-ray-y plotting routines.
""" |
import omega as om
kev = np.asarray(kev)
obs = np.asarray(obs)
mdl = np.asarray(mdl)
c_obs = np.cumsum(obs)
c_mdl = np.cumsum(mdl)
mx = max(c_obs[-1], c_mdl[-1])
p = om.RectPlot()
p.addXY([0, mx], [0, mx], '1:1')
p.addXY(c_mdl, c_obs, key_text)
# HACK: this range of numbers is chosen to give reasonable sampling for my
# sources, which are typically quite soft.
locs = np.array([0, 0.05, 0.08, 0.11, 0.17, 0.3, 0.4, 0.7, 1]) * (kev.size - 2)
c0 = mx * 1.05
c1 = mx * 1.1
for loc in locs:
i0 = int(np.floor(loc))
frac = loc - i0
kevval = (1 - frac) * kev[i0] + frac * kev[i0+1]
mdlval = (1 - frac) * c_mdl[i0] + frac * c_mdl[i0+1]
obsval = (1 - frac) * c_obs[i0] + frac * c_obs[i0+1]
p.addXY([mdlval, mdlval], [c0, c1], '%.2f keV' % kevval, dsn=2)
p.addXY([c0, c1], [obsval, obsval], None, dsn=2)
p.setLabels('Cumulative model ' + unit, 'Cumulative data ' + unit)
p.defaultKeyOverlay.vAlign = 0.3
return p |
<SYSTEM_TASK:>
Make a quantile-quantile plot comparing multiple sets of events and models.
<END_TASK>
<USER_TASK:>
Description:
def make_multi_qq_plots(arrays, key_text):
"""Make a quantile-quantile plot comparing multiple sets of events and models.
*arrays*
X.
*key_text*
Text describing the quantile-quantile comparison quantity; will be
shown on the plot legend.
Returns:
An :class:`omega.RectPlot` instance.
*TODO*: nothing about this is Sherpa-specific. Same goes for some of the
plotting routines in :mod:`pkwit.environments.casa.data`; might be
reasonable to add a submodule for generic X-ray-y plotting routines.
*TODO*: Some gross code duplication here.
""" |
import omega as om
p = om.RectPlot()
p.addXY([0, 1.], [0, 1.], '1:1')
for index, array in enumerate(arrays):
kev, obs, mdl = array
c_obs = np.cumsum(obs)
c_mdl = np.cumsum(mdl)
mx = 0.5 * (c_obs[-1] + c_mdl[-1])
c_obs /= mx
c_mdl /= mx
p.addXY(c_mdl, c_obs, '%s #%d' % (key_text, index))
# HACK: this range of numbers is chosen to give reasonable sampling for my
# sources, which are typically quite soft.
#
# Note: this reuses the variables from the last loop iteration.
locs = np.array([0, 0.05, 0.08, 0.11, 0.17, 0.3, 0.4, 0.7, 1]) * (kev.size - 2)
c0 = 1.05
c1 = 1.1
for loc in locs:
i0 = int(np.floor(loc))
frac = loc - i0
kevval = (1 - frac) * kev[i0] + frac * kev[i0+1]
mdlval = (1 - frac) * c_mdl[i0] + frac * c_mdl[i0+1]
obsval = (1 - frac) * c_obs[i0] + frac * c_obs[i0+1]
p.addXY([mdlval, mdlval], [c0, c1], '%.2f keV' % kevval, dsn=2)
p.addXY([c0, c1], [obsval, obsval], None, dsn=2)
p.setLabels('Cumulative rescaled model', 'Cumulative rescaled data')
p.defaultKeyOverlay.vAlign = 0.3
return p |
<SYSTEM_TASK:>
Make a plot of a spectral model and data.
<END_TASK>
<USER_TASK:>
Description:
def make_spectrum_plot(model_plot, data_plot, desc, xmin_clamp=0.01,
min_valid_x=None, max_valid_x=None):
"""Make a plot of a spectral model and data.
*model_plot*
A model plot object returned by Sherpa from a call like `ui.get_model_plot()`
or `ui.get_bkg_model_plot()`.
*data_plot*
A data plot object returned by Sherpa from a call like `ui.get_source_plot()`
or `ui.get_bkg_plot()`.
*desc*
Text describing the origin of the data; will be shown in the plot legend
(with "Model" and "Data" appended).
*xmin_clamp*
The smallest "x" (energy axis) value that will be plotted; default is 0.01.
This is needed to allow the plot to be shown on a logarithmic scale if
the energy axes of the model go all the way to 0.
*min_valid_x*
Either None, or the smallest "x" (energy axis) value in which the model and
data are valid; this could correspond to a range specified in the "notice"
command during analysis. If specified, a gray band will be added to the plot
showing the invalidated regions.
*max_valid_x*
Like *min_valid_x* but for the largest "x" (energy axis) value in which the
model and data are valid.
Returns:
A tuple ``(plot, xlow, xhigh)``, where *plot* an OmegaPlot RectPlot
instance, *xlow* is the left edge of the plot bounds, and *xhigh* is the
right edge of the plot bounds.
""" |
import omega as om
model_x = np.concatenate((model_plot.xlo, [model_plot.xhi[-1]]))
model_x[0] = max(model_x[0], xmin_clamp)
model_y = np.concatenate((model_plot.y, [0.]))
# Sigh, sometimes Sherpa gives us bad values.
is_bad = ~np.isfinite(model_y)
if is_bad.sum():
from .cli import warn
warn('bad Sherpa model Y value(s) at: %r', np.where(is_bad)[0])
model_y[is_bad] = 0
data_left_edges = data_plot.x - 0.5 * data_plot.xerr
data_left_edges[0] = max(data_left_edges[0], xmin_clamp)
data_hist_x = np.concatenate((data_left_edges, [data_plot.x[-1] + 0.5 * data_plot.xerr[-1]]))
data_hist_y = np.concatenate((data_plot.y, [0.]))
log_bounds_pad_factor = 0.9
xlow = model_x[0] * log_bounds_pad_factor
xhigh = model_x[-1] / log_bounds_pad_factor
p = om.RectPlot()
if min_valid_x is not None:
p.add(om.rect.XBand(1e-3 * xlow, min_valid_x, keyText=None), zheight=-1, dsn=1)
if max_valid_x is not None:
p.add(om.rect.XBand(max_valid_x, xhigh * 1e3, keyText=None), zheight=-1, dsn=1)
csp = om.rect.ContinuousSteppedPainter(keyText=desc + ' Model')
csp.setFloats(model_x, model_y)
p.add(csp)
csp = om.rect.ContinuousSteppedPainter(keyText=None)
csp.setFloats(data_hist_x, data_hist_y)
p.add(csp)
p.addXYErr(data_plot.x, data_plot.y, data_plot.yerr, desc + ' Data', lines=0, dsn=1)
p.setLabels(data_plot.xlabel, data_plot.ylabel)
p.setLinLogAxes(True, False)
p.setBounds (xlow, xhigh)
return p, xlow, xhigh |
<SYSTEM_TASK:>
Download the given file. Clobber overwrites file if exists.
<END_TASK>
<USER_TASK:>
Description:
def download_file(local_filename, url, clobber=False):
"""Download the given file. Clobber overwrites file if exists.""" |
dir_name = os.path.dirname(local_filename)
mkdirs(dir_name)
if clobber or not os.path.exists(local_filename):
i = requests.get(url)
# if not exists
if i.status_code == 404:
print('Failed to download file:', local_filename, url)
return False
# write out in 1MB chunks
chunk_size_in_bytes = 1024*1024 # 1MB
with open(local_filename, 'wb') as local_file:
for chunk in i.iter_content(chunk_size=chunk_size_in_bytes):
local_file.write(chunk)
return True |
<SYSTEM_TASK:>
Download the given JSON file, and pretty-print before we output it.
<END_TASK>
<USER_TASK:>
Description:
def download_json(local_filename, url, clobber=False):
"""Download the given JSON file, and pretty-print before we output it.""" |
with open(local_filename, 'w') as json_file:
json_file.write(json.dumps(requests.get(url).json(), sort_keys=True, indent=2, separators=(',', ': '))) |
<SYSTEM_TASK:>
Turn arbitrary data values into ARGB32 colors.
<END_TASK>
<USER_TASK:>
Description:
def data_to_argb32 (data, cmin=None, cmax=None, stretch='linear', cmap='black_to_blue'):
"""Turn arbitrary data values into ARGB32 colors.
There are three steps to this process: clipping the data values to a
maximum and minimum; stretching the spacing between those values; and
converting their amplitudes into colors with some kind of color map.
`data` - Input data; can (and should) be a MaskedArray if some values are
invalid.
`cmin` - The data clip minimum; all values <= cmin are treated
identically. If None (the default), `data.min ()` is used.
`cmax` - The data clip maximum; all values >= cmax are treated
identically. If None (the default), `data.max ()` is used.
`stretch` - The stretch function name; 'linear', 'sqrt', or 'square'; see
the Stretcher class.
`cmap` - The color map name; defaults to 'black_to_blue'. See the
`pwkit.colormaps` module for more choices.
Returns a Numpy array of the same shape as `data` with dtype `np.uint32`,
which represents the ARGB32 colorized version of the data. If your
colormap is restricted to a single R or G or B channel, you can make color
images by bitwise-or'ing together different such arrays.
""" |
# This could be more efficient, but whatever. This lets us share code with
# the ndshow module.
clipper = Clipper ()
clipper.alloc_buffer (data)
clipper.set_tile_size ()
clipper.dmin = cmin if cmin is not None else data.min ()
clipper.dmax = cmax if cmax is not None else data.max ()
clipper.ensure_all_updated (data)
stretcher = Stretcher (stretch)
stretcher.alloc_buffer (clipper.buffer)
stretcher.set_tile_size ()
stretcher.ensure_all_updated (clipper.buffer)
mapper = ColorMapper (cmap)
mapper.alloc_buffer (stretcher.buffer)
mapper.set_tile_size ()
mapper.ensure_all_updated (stretcher.buffer)
return mapper.buffer |
<SYSTEM_TASK:>
Turn arbitrary data values into a Cairo ImageSurface.
<END_TASK>
<USER_TASK:>
Description:
def data_to_imagesurface (data, **kwargs):
"""Turn arbitrary data values into a Cairo ImageSurface.
The method and arguments are the same as data_to_argb32, except that the
data array will be treated as 2D, and higher dimensionalities are not
allowed. The return value is a Cairo ImageSurface object.
Combined with the write_to_png() method on ImageSurfaces, this is an easy
way to quickly visualize 2D data.
""" |
import cairo
data = np.atleast_2d (data)
if data.ndim != 2:
raise ValueError ('input array may not have more than 2 dimensions')
argb32 = data_to_argb32 (data, **kwargs)
format = cairo.FORMAT_ARGB32
height, width = argb32.shape
stride = cairo.ImageSurface.format_stride_for_width (format, width)
if argb32.strides[0] != stride:
raise ValueError ('stride of data array not compatible with ARGB32')
return cairo.ImageSurface.create_for_data (argb32, format,
width, height, stride) |
<SYSTEM_TASK:>
Compute the packed pivoting Q-R factorization of a matrix.
<END_TASK>
<USER_TASK:>
Description:
def _qr_factor_packed(a, enorm, finfo):
"""Compute the packed pivoting Q-R factorization of a matrix.
Parameters:
a - An n-by-m matrix, m >= n. This will be *overwritten*
by this function as described below!
enorm - A Euclidian-norm-computing function.
finfo - A Numpy finfo object.
Returns:
pmut - An n-element permutation vector
rdiag - An n-element vector of the diagonal of R
acnorm - An n-element vector of the norms of the rows
of the input matrix 'a'.
Computes the transposed Q-R factorization of the matrix 'a', with
pivoting, in a packed form, in-place. The packed information can be
used to construct matrices Q and R such that
A P = R Q or, in Python,
np.dot(r, q) = a[pmut]
where q is m-by-m and q q^T = ident and r is n-by-m and is lower
triangular. The function _qr_factor_full can compute these
matrices. The packed form of output is all that is used by the main LM
fitting algorithm.
"Pivoting" refers to permuting the rows of 'a' to have their norms in
nonincreasing order. The return value 'pmut' maps the unpermuted rows
of 'a' to permuted rows. That is, the norms of the rows of a[pmut] are
in nonincreasing order.
The parameter 'a' is overwritten by this function. Its new value
should still be interpreted as an n-by-m array. It comes in two
parts. Its strict lower triangular part contains the struct lower
triangular part of R. (The diagonal of R is returned in 'rdiag' and
the strict upper trapezoidal part of R is zero.) The upper trapezoidal
part of 'a' contains Q as factorized into a series of Householder
transformation vectors. Q can be reconstructed as the matrix product
of n Householder matrices, where the i'th Householder matrix is
defined by
H_i = I - 2 (v^T v) / (v v^T)
where 'v' is the pmut[i]'th row of 'a' with its strict lower
triangular part set to zero. See _qr_factor_full for more information.
'rdiag' contains the diagonal part of the R matrix, taking into
account the permutation of 'a'. The strict lower triangular part of R
is stored in 'a' *with permutation*, so that the i'th row of R has
rdiag[i] as its diagonal and a[pmut[i],:i] as its upper part. See
_qr_factor_full for more information.
'acnorm' contains the norms of the rows of the original input
matrix 'a' without permutation.
The form of this transformation and the method of pivoting first
appeared in Linpack.""" |
machep = finfo.eps
n, m = a.shape
if m < n:
raise ValueError('"a" must be at least as tall as it is wide')
acnorm = np.empty(n, finfo.dtype)
for j in range(n):
acnorm[j] = enorm(a[j], finfo)
rdiag = acnorm.copy()
wa = acnorm.copy()
pmut = np.arange(n)
for i in range(n):
# Find the row of a with the i'th largest norm, and note it in
# the pivot vector.
kmax = rdiag[i:].argmax() + i
if kmax != i:
temp = pmut[i]
pmut[i] = pmut[kmax]
pmut[kmax] = temp
rdiag[kmax] = rdiag[i]
wa[kmax] = wa[i]
temp = a[i].copy()
a[i] = a[kmax]
a[kmax] = temp
# Compute the Householder transformation to reduce the i'th
# row of A to a multiple of the i'th unit vector.
ainorm = enorm(a[i,i:], finfo)
if ainorm == 0:
rdiag[i] = 0
continue
if a[i,i] < 0:
# Doing this apparently improves FP precision somehow.
ainorm = -ainorm
a[i,i:] /= ainorm
a[i,i] += 1
# Apply the transformation to the remaining rows and update
# the norms.
for j in range(i + 1, n):
a[j,i:] -= a[i,i:] * np.dot(a[i,i:], a[j,i:]) / a[i,i]
if rdiag[j] != 0:
rdiag[j] *= np.sqrt(max(1 - (a[j,i] / rdiag[j])**2, 0))
if 0.05 * (rdiag[j] / wa[j])**2 <= machep:
# What does this do???
wa[j] = rdiag[j] = enorm(a[j,i+1:], finfo)
rdiag[i] = -ainorm
return pmut, rdiag, acnorm |
<SYSTEM_TASK:>
Compute the QR factorization of a matrix, with pivoting.
<END_TASK>
<USER_TASK:>
Description:
def _qr_factor_full(a, dtype=np.float):
"""Compute the QR factorization of a matrix, with pivoting.
Parameters:
a - An n-by-m arraylike, m >= n.
dtype - (optional) The data type to use for computations.
Default is np.float.
Returns:
q - An m-by-m orthogonal matrix (q q^T = ident)
r - An n-by-m upper triangular matrix
pmut - An n-element permutation vector
The returned values will satisfy the equation
np.dot(r, q) == a[:,pmut]
The outputs are computed indirectly via the function
_qr_factor_packed. If you need to compute q and r matrices in
production code, there are faster ways to do it. This function is for
testing _qr_factor_packed.
The permutation vector pmut is a vector of the integers 0 through
n-1. It sorts the rows of 'a' by their norms, so that the
pmut[i]'th row of 'a' has the i'th biggest norm.""" |
n, m = a.shape
# Compute the packed Q and R matrix information.
packed, pmut, rdiag, acnorm = \
_manual_qr_factor_packed(a, dtype)
# Now we unpack. Start with the R matrix, which is easy: we just
# have to piece it together from the strict lower triangle of 'a'
# and the diagonal in 'rdiag'.
r = np.zeros((n, m))
for i in range(n):
r[i,:i] = packed[i,:i]
r[i,i] = rdiag[i]
# Now the Q matrix. It is the concatenation of n Householder
# transformations, each of which is defined by a row in the upper
# trapezoidal portion of 'a'. We extract the appropriate vector,
# construct the matrix for the Householder transform, and build up
# the Q matrix.
q = np.eye(m)
v = np.empty(m)
for i in range(n):
v[:] = packed[i]
v[:i] = 0
hhm = np.eye(m) - 2 * np.outer(v, v) / np.dot(v, v)
q = np.dot(hhm, q)
return q, r, pmut |
<SYSTEM_TASK:>
Solve an equation given a QR factored matrix and a diagonal.
<END_TASK>
<USER_TASK:>
Description:
def _qrd_solve(r, pmut, ddiag, bqt, sdiag):
"""Solve an equation given a QR factored matrix and a diagonal.
Parameters:
r - **input-output** n-by-n array. The full lower triangle contains
the full lower triangle of R. On output, the strict upper
triangle contains the transpose of the strict lower triangle of
S.
pmut - n-vector describing the permutation matrix P.
ddiag - n-vector containing the diagonal of the matrix D in the base
problem (see below).
bqt - n-vector containing the first n elements of B Q^T.
sdiag - output n-vector. It is filled with the diagonal of S. Should
be preallocated by the caller -- can result in somewhat greater
efficiency if the vector is reused from one call to the next.
Returns:
x - n-vector solving the equation.
Compute the n-vector x such that
A^T x = B, D x = 0
where A is an n-by-m matrix, B is an m-vector, and D is an n-by-n
diagonal matrix. We are given information about pivoted QR
factorization of A with permutation, such that
A P = R Q
where P is a permutation matrix, Q has orthogonal rows, and R is lower
triangular with nonincreasing diagonal elements. Q is m-by-m, R is
n-by-m, and P is n-by-n. If x = P z, then we need to solve
R z = B Q^T,
P^T D P z = 0 (why the P^T? and do these need to be updated for the transposition?)
If the system is rank-deficient, these equations are solved as well as
possible in a least-squares sense. For the purposes of the LM
algorithm we also compute the lower triangular n-by-n matrix S such
that
P^T (A^T A + D D) P = S^T S. (transpose?)
""" |
n, m = r.shape
# "Copy r and bqt to preserve input and initialize s. In
# particular, save the diagonal elements of r in x." Recall that
# on input only the full lower triangle of R is meaningful, so we
# can mirror that into the upper triangle without issues.
for i in range(n):
r[i,i:] = r[i:,i]
x = r.diagonal().copy()
zwork = bqt.copy()
# "Eliminate the diagonal matrix d using a Givens rotation."
for i in range(n):
# "Prepare the row of D to be eliminated, locating the
# diagonal element using P from the QR factorization."
li = pmut[i]
if ddiag[li] == 0:
sdiag[i] = r[i,i]
r[i,i] = x[i]
continue
sdiag[i:] = 0
sdiag[i] = ddiag[li]
# "The transformations to eliminate the row of d modify only a
# single element of (q transpose)*b beyond the first n, which
# is initially zero."
bqtpi = 0.
for j in range(i, n):
# "Determine a Givens rotation which eliminates the
# appropriate element in the current row of D."
if sdiag[j] == 0:
continue
if abs(r[j,j]) < abs(sdiag[j]):
cot = r[j,j] / sdiag[j]
sin = 0.5 / np.sqrt(0.25 + 0.25 * cot**2)
cos = sin * cot
else:
tan = sdiag[j] / r[j,j]
cos = 0.5 / np.sqrt(0.25 + 0.25 * tan**2)
sin = cos * tan
# "Compute the modified diagonal element of r and the
# modified element of ((q transpose)*b,0)."
r[j,j] = cos * r[j,j] + sin * sdiag[j]
temp = cos * zwork[j] + sin * bqtpi
bqtpi = -sin * zwork[j] + cos * bqtpi
zwork[j] = temp
# "Accumulate the transformation in the row of s."
if j + 1 < n:
temp = cos * r[j,j+1:] + sin * sdiag[j+1:]
sdiag[j+1:] = -sin * r[j,j+1:] + cos * sdiag[j+1:]
r[j,j+1:] = temp
# Save the diagonal of S and restore the diagonal of R
# from its saved location in x.
sdiag[i] = r[i,i]
r[i,i] = x[i]
# "Solve the triangular system for z. If the system is singular
# then obtain a least squares solution."
nsing = n
for i in range(n):
if sdiag[i] == 0.:
nsing = i
zwork[i:] = 0
break
if nsing > 0:
zwork[nsing-1] /= sdiag[nsing-1] # Degenerate case
# "Reverse loop"
for i in range(nsing - 2, -1, -1):
s = np.dot(zwork[i+1:nsing], r[i,i+1:nsing])
zwork[i] = (zwork[i] - s) / sdiag[i]
# "Permute the components of z back to components of x."
x[pmut] = zwork
return x |
<SYSTEM_TASK:>
Solve the equation A^T x = B, D x = 0.
<END_TASK>
<USER_TASK:>
Description:
def _qrd_solve_full(a, b, ddiag, dtype=np.float):
"""Solve the equation A^T x = B, D x = 0.
Parameters:
a - an n-by-m array, m >= n
b - an m-vector
ddiag - an n-vector giving the diagonal of D. (The rest of D is 0.)
Returns:
x - n-vector solving the equation.
s - the n-by-n supplementary matrix s.
pmut - n-element permutation vector defining the permutation matrix P.
The equations are solved in a least-squares sense if the system is
rank-deficient. D is a diagonal matrix and hence only its diagonal is
in fact supplied as an argument. The matrix s is full lower triangular
and solves the equation
P^T (A A^T + D D) P = S^T S (needs transposition?)
where P is the permutation matrix defined by the vector pmut; it puts
the rows of 'a' in order of nonincreasing rank, so that a[pmut]
has its rows sorted that way.
""" |
a = np.asarray(a, dtype)
b = np.asarray(b, dtype)
ddiag = np.asarray(ddiag, dtype)
n, m = a.shape
assert m >= n
assert b.shape == (m, )
assert ddiag.shape == (n, )
# The computation is straightforward.
q, r, pmut = _qr_factor_full(a)
bqt = np.dot(b, q.T)
x, s = _manual_qrd_solve(r[:,:n], pmut, ddiag, bqt,
dtype=dtype, build_s=True)
return x, s, pmut |
<SYSTEM_TASK:>
Calculate the covariance matrix of the fitted parameters
<END_TASK>
<USER_TASK:>
Description:
def _calc_covariance(r, pmut, tol=1e-14):
"""Calculate the covariance matrix of the fitted parameters
Parameters:
r - n-by-n matrix, the full upper triangle of R
pmut - n-vector, defines the permutation of R
tol - scalar, relative column scale for determining rank
deficiency. Default 1e-14.
Returns:
cov - n-by-n matrix, the covariance matrix C
Given an n-by-n matrix A, the corresponding covariance matrix
is
C = inverse(A^T A)
This routine is given information relating to the pivoted transposed
QR factorization of A, which is defined by matrices such that
A P = R Q
where P is a permutation matrix, Q has orthogonal rows, and R is a
lower triangular matrix with diagonal elements of nonincreasing
magnitude. In particular we take the full lower triangle of R ('r')
and a vector describing P ('pmut'). The covariance matrix is then
C = P inverse(R^T R) P^T
If A is nearly rank-deficient, it may be desirable to compute the
covariance matrix corresponding to the linearly-independent columns of
A. We use a tolerance, 'tol', to define the numerical rank of A. If j
is the largest integer such that |R[j,j]| > tol*|R[0,0]|, then we
compute the covariance matrix for the first j columns of R. For k > j,
the corresponding covariance entries (pmut[k]) are set to zero.
""" |
# This routine could save an allocation by operating on r in-place,
# which might be worthwhile for large n, and is what the original
# Fortran does.
n = r.shape[1]
assert r.shape[0] >= n
r = r.copy()
# Form the inverse of R in the full lower triangle of R.
jrank = -1
abstol = tol * abs(r[0,0])
for i in range(n):
if abs(r[i,i]) <= abstol:
break
r[i,i] **= -1
for j in range(i):
temp = r[i,i] * r[i,j]
r[i,j] = 0.
r[i,:j+1] -= temp * r[j,:j+1]
jrank = i
# Form the full lower triangle of the inverse(R^T R) in the full
# lower triangle of R.
for i in range(jrank + 1):
for j in range(i):
r[j,:j+1] += r[i,j] * r[i,:j+1]
r[i,:i+1] *= r[i,i]
# Form the full upper triangle of the covariance matrix in the
# strict upper triangle of R and in wa.
wa = np.empty(n)
wa.fill(r[0,0])
for i in range(n):
pi = pmut[i]
sing = i > jrank
for j in range(i + 1):
if sing:
r[i,j] = 0.
pj = pmut[j]
if pj > pi:
r[pi,pj] = r[i,j]
elif pj < pi:
r[pj,pi] = r[i,j]
wa[pi] = r[i,i]
# Symmetrize.
for i in range(n):
r[i,:i+1] = r[:i+1,i]
r[i,i] = wa[i]
return r |
<SYSTEM_TASK:>
Invoke a tool and exit.
<END_TASK>
<USER_TASK:>
Description:
def invoke_tool (namespace, tool_class=None):
"""Invoke a tool and exit.
`namespace` is a namespace-type dict from which the tool is initialized.
It should contain exactly one value that is a `Multitool` subclass, and
this subclass will be instantiated and populated (see
`Multitool.populate()`) using the other items in the namespace. Instances
and subclasses of `Command` will therefore be registered with the
`Multitool`. The tool is then invoked.
`pwkit.cli.propagate_sigint()` and `pwkit.cli.unicode_stdio()` are called
at the start of this function. It should therefore be only called immediately
upon startup of the Python interpreter.
This function always exits with an exception. The exception will be
SystemExit (0) in case of success.
The intended invocation is `invoke_tool (globals ())` in some module that
defines a `Multitool` subclass and multiple `Command` subclasses.
If `tool_class` is not None, this is used as the tool class rather than
searching `namespace`, potentially avoiding problems with modules
containing multiple `Multitool` implementations.
""" |
import sys
from .. import cli
cli.propagate_sigint ()
cli.unicode_stdio ()
cli.backtrace_on_usr1 ()
if tool_class is None:
for value in itervalues (namespace):
if is_strict_subclass (value, Multitool):
if tool_class is not None:
raise PKError ('do not know which Multitool implementation to use')
tool_class = value
if tool_class is None:
raise PKError ('no Multitool implementation to use')
tool = tool_class ()
tool.populate (itervalues (namespace))
tool.commandline (sys.argv) |
<SYSTEM_TASK:>
Return an instance of `argparse.ArgumentParser` used to process
<END_TASK>
<USER_TASK:>
Description:
def get_arg_parser (self, **kwargs):
"""Return an instance of `argparse.ArgumentParser` used to process
this tool's command-line arguments.
""" |
import argparse
ap = argparse.ArgumentParser (
prog = kwargs['argv0'],
description = self.summary,
)
return ap |
<SYSTEM_TASK:>
Register a new command with the tool. 'cmd' is expected to be an instance
<END_TASK>
<USER_TASK:>
Description:
def register (self, cmd):
"""Register a new command with the tool. 'cmd' is expected to be an instance
of `Command`, although here only the `cmd.name` attribute is
investigated. Multiple commands with the same name are not allowed to
be registered. Returns 'self'.
""" |
if cmd.name is None:
raise ValueError ('no name set for Command object %r' % cmd)
if cmd.name in self.commands:
raise ValueError ('a command named "%s" has already been '
'registered' % cmd.name)
self.commands[cmd.name] = cmd
return self |
<SYSTEM_TASK:>
This function mainly exists to be overridden by subclasses.
<END_TASK>
<USER_TASK:>
Description:
def invoke_command (self, cmd, args, **kwargs):
"""This function mainly exists to be overridden by subclasses.""" |
new_kwargs = kwargs.copy ()
new_kwargs['argv0'] = kwargs['argv0'] + ' ' + cmd.name
new_kwargs['parent'] = self
new_kwargs['parent_kwargs'] = kwargs
return cmd.invoke_with_usage (args, **new_kwargs) |
<SYSTEM_TASK:>
There must be a way to be efficient and stream output instead of loading
<END_TASK>
<USER_TASK:>
Description:
def merge_bibtex_collections(citednames, maindict, extradicts, allow_missing=False):
"""There must be a way to be efficient and stream output instead of loading
everything into memory at once, but, meh.
Note that we augment `citednames` with all of the names in `maindict`. The
intention is that if we've gone to the effort of getting good data for
some record, we don't want to trash it if the citation is temporarily
removed (even if it ought to be manually recoverable from version
control). Seems better to err on the side of preservation; I can write a
quick pruning tool later if needed.
""" |
allrecords = {}
for ed in extradicts:
allrecords.update(ed)
allrecords.update(maindict)
missing = []
from collections import OrderedDict
records = OrderedDict()
from itertools import chain
wantednames = sorted(chain(citednames, six.viewkeys(maindict)))
for name in wantednames:
rec = allrecords.get(name)
if rec is None:
missing.append(name)
else:
records[name] = rec
if len(missing) and not allow_missing:
# TODO: custom exception so caller can actually see what's missing;
# could conceivably stub out missing records or something.
raise PKError('missing BibTeX records: %s', ' '.join(missing))
return records |
<SYSTEM_TASK:>
bibtexparser.write converts the entire database to one big string and
<END_TASK>
<USER_TASK:>
Description:
def write_bibtex_dict(stream, entries):
"""bibtexparser.write converts the entire database to one big string and
writes it out in one go. I'm sure it will always all fit in RAM but some
things just will not stand.
""" |
from bibtexparser.bwriter import BibTexWriter
writer = BibTexWriter()
writer.indent = ' '
writer.entry_separator = ''
first = True
for rec in entries:
if first:
first = False
else:
stream.write(b'\n')
stream.write(writer._entry_to_bibtex(rec).encode('utf8')) |
<SYSTEM_TASK:>
Merge multiple BibTeX files into a single homogeneously-formatted output,
<END_TASK>
<USER_TASK:>
Description:
def merge_bibtex_with_aux(auxpath, mainpath, extradir, parse=get_bibtex_dict, allow_missing=False):
"""Merge multiple BibTeX files into a single homogeneously-formatted output,
using a LaTeX .aux file to know which records are worth paying attention
to.
The file identified by `mainpath` will be overwritten with the new .bib
contents. This function is intended to be used in a version-control
context.
Files matching the glob "*.bib" in `extradir` will be read in to
supplement the information in `mainpath`. Records already in the file in
`mainpath` always take precedence.
""" |
auxpath = Path(auxpath)
mainpath = Path(mainpath)
extradir = Path(extradir)
with auxpath.open('rt') as aux:
citednames = sorted(cited_names_from_aux_file(aux))
main = mainpath.try_open(mode='rt')
if main is None:
maindict = {}
else:
maindict = parse(main)
main.close()
def gen_extra_dicts():
# If extradir does not exist, Path.glob() will return an empty list,
# which seems acceptable to me.
for item in sorted(extradir.glob('*.bib')):
with item.open('rt') as extra:
yield parse(extra)
merged = merge_bibtex_collections(citednames, maindict, gen_extra_dicts(),
allow_missing=allow_missing)
with mainpath.make_tempfile(want='handle', resolution='overwrite') as newbib:
write_bibtex_dict(newbib, six.viewvalues(merged)) |
<SYSTEM_TASK:>
Tectonic has taken over most of the features that this tool used to provide,
<END_TASK>
<USER_TASK:>
Description:
def just_smart_bibtools(bib_style, aux, bib):
"""Tectonic has taken over most of the features that this tool used to provide,
but here's a hack to keep my smart .bib file generation working.
""" |
extradir = Path('.bibtex')
extradir.ensure_dir(parents=True)
bib_export(bib_style, aux, extradir / 'ZZ_bibtools.bib',
no_tool_ok=True, quiet=True, ignore_missing=True)
merge_bibtex_with_aux(aux, bib, extradir) |
<SYSTEM_TASK:>
Create a basepol from antenna numbers and a CASA polarization code.
<END_TASK>
<USER_TASK:>
Description:
def aap_to_bp (ant1, ant2, pol):
"""Create a basepol from antenna numbers and a CASA polarization code.""" |
if ant1 < 0:
raise ValueError ('first antenna is below 0: %s' % ant1)
if ant2 < ant1:
raise ValueError ('second antenna is below first: %s' % ant2)
if pol < 1 or pol > 12:
raise ValueError ('illegal polarization code %s' % pol)
fps = _pol_to_fpol[pol]
ap1 = (ant1 << 3) + ((fps >> 4) & 0x07)
ap2 = (ant2 << 3) + (fps & 0x07)
return ap1, ap2 |
<SYSTEM_TASK:>
We have loaded in all of the visibilities in one timeslot. We can now
<END_TASK>
<USER_TASK:>
Description:
def _finish_timeslot (self):
"""We have loaded in all of the visibilities in one timeslot. We can now
compute the phase closure triples.
XXX: we should only process independent triples. Are we???
""" |
for fpol, aps in self.ap_by_fpol.items ():
aps = sorted (aps)
nap = len (aps)
for i1, ap1 in enumerate (aps):
for i2 in range (i1, nap):
ap2 = aps[i2]
bp1 = (ap1, ap2)
info = self.data_by_bp.get (bp1)
if info is None:
continue
data1, flags1 = info
for i3 in range (i2, nap):
ap3 = aps[i3]
bp2 = (ap2, ap3)
info = self.data_by_bp.get (bp2)
if info is None:
continue
data2, flags2 = info
bp3 = (ap1, aps[i3])
info = self.data_by_bp.get (bp3)
if info is None:
continue
data3, flags3 = info
# try to minimize allocations:
tflags = flags1 & flags2
np.logical_and (tflags, flags3, tflags)
if not tflags.any ():
continue
triple = data3.conj ()
np.multiply (triple, data1, triple)
np.multiply (triple, data2, triple)
self._process_sample (ap1, ap2, ap3, triple, tflags)
# Reset for next timeslot
self.cur_time = -1.
self.bp_by_ap = None
self.ap_by_fpol = None |
<SYSTEM_TASK:>
We have computed one independent phase closure triple in one timeslot.
<END_TASK>
<USER_TASK:>
Description:
def _process_sample (self, ap1, ap2, ap3, triple, tflags):
"""We have computed one independent phase closure triple in one timeslot.
""" |
# Frequency-resolved:
np.divide (triple, np.abs (triple), triple)
phase = np.angle (triple)
self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap1, phase, tflags + 0.)
self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap2, phase, tflags + 0.)
self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap3, phase, tflags + 0.)
# Frequency-averaged:
triple = np.dot (triple, tflags) / tflags.sum ()
phase = np.angle (triple)
self.global_stats_by_time.accum (self.cur_time, phase)
self.ap_stats_by_ddid[self.cur_ddid].accum (ap1, phase)
self.ap_stats_by_ddid[self.cur_ddid].accum (ap2, phase)
self.ap_stats_by_ddid[self.cur_ddid].accum (ap3, phase)
self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap2), phase)
self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap3), phase)
self.bp_stats_by_ddid[self.cur_ddid].accum ((ap2, ap3), phase)
self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap1, phase)
self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap2, phase)
self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap3, phase) |
<SYSTEM_TASK:>
Load a Phoenix model atmosphere spectrum.
<END_TASK>
<USER_TASK:>
Description:
def load_spectrum(path, smoothing=181, DF=-8.):
"""Load a Phoenix model atmosphere spectrum.
path : string
The file path to load.
smoothing : integer
Smoothing to apply. If None, do not smooth. If an integer, smooth with a
Hamming window. Otherwise, the variable is assumed to be a different
smoothing window, and the data will be convolved with it.
DF: float
Numerical factor used to compute the emergent flux density.
Returns a Pandas DataFrame containing the columns:
wlen
Sample wavelength in Angstrom.
flam
Flux density in erg/cm²/s/Å. See `pwkit.synphot` for related tools.
The values of *flam* returned by this function are computed from the
second column of the data file as specified in the documentation: ``flam =
10**(col2 + DF)``. The documentation states that the default value, -8, is
appropriate for most modern models; but some older models use other
values.
Loading takes about 5 seconds on my current laptop. Un-smoothed spectra
have about 630,000 samples.
""" |
try:
ang, lflam = np.loadtxt(path, usecols=(0,1)).T
except ValueError:
# In some files, the numbers in the first columns fill up the
# whole 12-character column width, and are given in exponential
# notation with a 'D' character, so we must be more careful:
with open(path, 'rb') as f:
def lines():
for line in f:
yield line.replace(b'D', b'e')
ang, lflam = np.genfromtxt(lines(), delimiter=(13, 12)).T
# Data files do not come sorted!
z = ang.argsort()
ang = ang[z]
flam = 10**(lflam[z] + DF)
del z
if smoothing is not None:
if isinstance(smoothing, int):
smoothing = np.hamming(smoothing)
else:
smoothing = np.asarray(smoothing)
wnorm = np.convolve(np.ones_like(smoothing), smoothing, mode='valid')
smoothing = smoothing / wnorm # do not alter original array.
smooth = lambda a: np.convolve(a, smoothing, mode='valid')[::smoothing.size]
ang = smooth(ang)
flam = smooth(flam)
return pd.DataFrame({'wlen': ang, 'flam': flam}) |
<SYSTEM_TASK:>
Estimate a UCD's bolometric luminosity given some basic parameters.
<END_TASK>
<USER_TASK:>
Description:
def lbol_from_spt_dist_mag (sptnum, dist_pc, jmag, kmag, format='cgs'):
"""Estimate a UCD's bolometric luminosity given some basic parameters.
sptnum: the spectral type as a number; 8 -> M8; 10 -> L0 ; 20 -> T0
Valid values range between 0 and 30, ie M0 to Y0.
dist_pc: distance to the object in parsecs
jmag: object's J-band magnitude or NaN (*not* None) if unavailable
kmag: same with K-band magnitude
format: either 'cgs', 'logcgs', or 'logsun', defining the form of the
outputs. Logarithmic quantities are base 10.
This routine can be used with vectors of measurements. The result will be
NaN if a value cannot be computed. This routine implements the method
documented in the Appendix of Williams et al., 2014ApJ...785....9W
(doi:10.1088/0004-637X/785/1/9).
""" |
bcj = bcj_from_spt (sptnum)
bck = bck_from_spt (sptnum)
n = np.zeros (sptnum.shape, dtype=np.int)
app_mbol = np.zeros (sptnum.shape)
w = np.isfinite (bcj) & np.isfinite (jmag)
app_mbol[w] += jmag[w] + bcj[w]
n[w] += 1
w = np.isfinite (bck) & np.isfinite (kmag)
app_mbol[w] += kmag[w] + bck[w]
n[w] += 1
w = (n != 0)
abs_mbol = (app_mbol[w] / n[w]) - 5 * (np.log10 (dist_pc[w]) - 1)
# note: abs_mbol is filtered by `w`
lbol = np.empty (sptnum.shape)
lbol.fill (np.nan)
lbol[w] = lbol_from_mbol (abs_mbol, format=format)
return lbol |
<SYSTEM_TASK:>
Equivalent of `map` built-in, without swallowing KeyboardInterrupt.
<END_TASK>
<USER_TASK:>
Description:
def map(self, func, iterable, chunksize=None):
"""Equivalent of `map` built-in, without swallowing KeyboardInterrupt.
func
The function to apply to the items.
iterable
An iterable of items that will have `func` applied to them.
""" |
# The key magic is that we must call r.get() with a timeout, because a
# Condition.wait() without a timeout swallows KeyboardInterrupts.
r = self.map_async(func, iterable, chunksize)
while True:
try:
return r.get(self.wait_timeout)
except TimeoutError:
pass
except KeyboardInterrupt:
self.terminate()
self.join()
raise |
<SYSTEM_TASK:>
Format an angle as sexagesimal hours in a string.
<END_TASK>
<USER_TASK:>
Description:
def fmthours (radians, norm='wrap', precision=3, seps='::'):
"""Format an angle as sexagesimal hours in a string.
Arguments are:
radians
The angle, in radians.
norm (default "wrap")
The normalization mode, used for angles outside of the standard range
of 0 to 2π. If "none", the value is formatted ignoring any potential
problems. If "wrap", it is wrapped to lie within the standard range.
If "raise", a :exc:`ValueError` is raised.
precision (default 3)
The number of decimal places in the "seconds" place to use in the
formatted string.
seps (default "::")
A two- or three-item iterable, used to separate the hours, minutes, and
seconds components. If a third element is present, it appears after the
seconds component. Specifying "hms" yields something like "12h34m56s";
specifying ``['', '']`` yields something like "123456".
Returns a string.
""" |
return _fmtsexagesimal (radians * R2H, norm, 24, seps, precision=precision) |
<SYSTEM_TASK:>
Format a longitudinal angle as sexagesimal degrees in a string.
<END_TASK>
<USER_TASK:>
Description:
def fmtdeglon (radians, norm='wrap', precision=2, seps='::'):
"""Format a longitudinal angle as sexagesimal degrees in a string.
Arguments are:
radians
The angle, in radians.
norm (default "wrap")
The normalization mode, used for angles outside of the standard range
of 0 to 2π. If "none", the value is formatted ignoring any potential
problems. If "wrap", it is wrapped to lie within the standard range.
If "raise", a :exc:`ValueError` is raised.
precision (default 2)
The number of decimal places in the "arcseconds" place to use in the
formatted string.
seps (default "::")
A two- or three-item iterable, used to separate the degrees, arcminutes,
and arcseconds components. If a third element is present, it appears
after the arcseconds component. Specifying "dms" yields something like
"12d34m56s"; specifying ``['', '']`` yields something like "123456".
Returns a string.
""" |
return _fmtsexagesimal (radians * R2D, norm, 360, seps, precision=precision) |
<SYSTEM_TASK:>
Format a latitudinal angle as sexagesimal degrees in a string.
<END_TASK>
<USER_TASK:>
Description:
def fmtdeglat (radians, norm='raise', precision=2, seps='::'):
"""Format a latitudinal angle as sexagesimal degrees in a string.
Arguments are:
radians
The angle, in radians.
norm (default "raise")
The normalization mode, used for angles outside of the standard range
of -π/2 to π/2. If "none", the value is formatted ignoring any potential
problems. If "wrap", it is wrapped to lie within the standard range.
If "raise", a :exc:`ValueError` is raised.
precision (default 2)
The number of decimal places in the "arcseconds" place to use in the
formatted string.
seps (default "::")
A two- or three-item iterable, used to separate the degrees, arcminutes,
and arcseconds components. If a third element is present, it appears
after the arcseconds component. Specifying "dms" yields something like
"+12d34m56s"; specifying ``['', '']`` yields something like "123456".
Returns a string. The return value always includes a plus or minus sign.
Note that the default of *norm* is different than in :func:`fmthours` and
:func:`fmtdeglon` since it's not so clear what a "latitude" of 110 degrees
(e.g.) means.
""" |
if norm == 'none':
pass
elif norm == 'raise':
if radians > halfpi or radians < -halfpi:
raise ValueError ('illegal latitude of %f radians' % radians)
elif norm == 'wrap':
radians = angcen (radians)
if radians > halfpi:
radians = pi - radians
elif radians < -halfpi:
radians = -pi - radians
else:
raise ValueError ('unrecognized normalization type "%s"' % norm)
if len (seps) < 2:
# To ponder: we accept len(seps) > 3; seems OK.
raise ValueError ('there must be at least two sexagesimal separators; '
'got value "%s"' % seps)
precision = max (int (precision), 0)
if precision == 0:
width = 2
else:
width = precision + 3
degrees = radians * R2D
if degrees >= 0:
sgn = '+'
else:
sgn = '-'
degrees = -degrees
deg = int (np.floor (degrees))
amin = int (np.floor ((degrees - deg) * 60))
asec = round (3600 * (degrees - deg - amin / 60.), precision)
if asec >= 60:
# Can happen if we round up
asec -= 60
amin += 1
if amin >= 60:
amin -= 60
deg += 1
if len (seps) > 2:
sep2 = seps[2]
else:
sep2 = ''
return '%s%02d%s%02d%s%0*.*f%s' % \
(sgn, deg, seps[0], amin, seps[1], width, precision, asec, sep2) |
<SYSTEM_TASK:>
Format equatorial coordinates in a single sexagesimal string.
<END_TASK>
<USER_TASK:>
Description:
def fmtradec (rarad, decrad, precision=2, raseps='::', decseps='::', intersep=' '):
"""Format equatorial coordinates in a single sexagesimal string.
Returns a string of the RA/lon coordinate, formatted as sexagesimal hours,
then *intersep*, then the Dec/lat coordinate, formatted as degrees. This
yields something like "12:34:56.78 -01:23:45.6". Arguments are:
rarad
The right ascension coordinate, in radians. More generically, this is
the longitudinal coordinate; note that the ordering in this function
differs than the other spherical functions, which generally prefer
coordinates in "lat, lon" order.
decrad
The declination coordinate, in radians. More generically, this is the
latitudinal coordinate.
precision (default 2)
The number of decimal places in the "arcseconds" place of the
latitudinal (declination) coordinate. The longitudinal (right ascension)
coordinate gets one additional place, since hours are bigger than
degrees.
raseps (default "::")
A two- or three-item iterable, used to separate the hours, minutes, and
seconds components of the RA/lon coordinate. If a third element is
present, it appears after the seconds component. Specifying "hms" yields
something like "12h34m56s"; specifying ``['', '']`` yields something
like "123456".
decseps (default "::")
A two- or three-item iterable, used to separate the degrees, arcminutes,
and arcseconds components of the Dec/lat coordinate.
intersep (default " ")
The string separating the RA/lon and Dec/lat coordinates
""" |
return (fmthours (rarad, precision=precision + 1, seps=raseps) +
text_type (intersep) +
fmtdeglat (decrad, precision=precision, seps=decseps)) |
<SYSTEM_TASK:>
Parse a string formatted as sexagesimal hours into an angle.
<END_TASK>
<USER_TASK:>
Description:
def parsehours (hrstr):
"""Parse a string formatted as sexagesimal hours into an angle.
This function converts a textual representation of an angle, measured in
hours, into a floating point value measured in radians. The format of
*hrstr* is very limited: it may not have leading or trailing whitespace,
and the components of the sexagesimal representation must be separated by
colons. The input must therefore resemble something like
``"12:34:56.78"``. A :exc:`ValueError` will be raised if the input does
not resemble this template. Hours greater than 24 are not allowed, but
negative values are.
""" |
hr = _parsesexagesimal (hrstr, 'hours', False)
if hr >= 24:
raise ValueError ('illegal hour specification: ' + hrstr)
return hr * H2R |
<SYSTEM_TASK:>
Parse a latitude formatted as sexagesimal degrees into an angle.
<END_TASK>
<USER_TASK:>
Description:
def parsedeglat (latstr):
"""Parse a latitude formatted as sexagesimal degrees into an angle.
This function converts a textual representation of a latitude, measured in
degrees, into a floating point value measured in radians. The format of
*latstr* is very limited: it may not have leading or trailing whitespace,
and the components of the sexagesimal representation must be separated by
colons. The input must therefore resemble something like
``"-00:12:34.5"``. A :exc:`ValueError` will be raised if the input does
not resemble this template. Latitudes greater than 90 or less than -90
degrees are not allowed.
""" |
deg = _parsesexagesimal (latstr, 'latitude', True)
if abs (deg) > 90:
raise ValueError ('illegal latitude specification: ' + latstr)
return deg * D2R |
<SYSTEM_TASK:>
Calculate the distance between two locations on a sphere.
<END_TASK>
<USER_TASK:>
Description:
def sphdist (lat1, lon1, lat2, lon2):
"""Calculate the distance between two locations on a sphere.
lat1
The latitude of the first location.
lon1
The longitude of the first location.
lat2
The latitude of the second location.
lon2
The longitude of the second location.
Returns the separation in radians. All arguments are in radians as well.
The arguments may be vectors.
Note that the ordering of the arguments maps to the nonstandard ordering
``(Dec, RA)`` in equatorial coordinates. In a spherical projection it maps
to ``(Y, X)`` which may also be unexpected.
The distance is computed with the "specialized Vincenty formula". Faster
but more error-prone formulae are possible; see Wikipedia on Great-circle
Distance.
""" |
cd = np.cos (lon2 - lon1)
sd = np.sin (lon2 - lon1)
c2 = np.cos (lat2)
c1 = np.cos (lat1)
s2 = np.sin (lat2)
s1 = np.sin (lat1)
a = np.sqrt ((c2 * sd)**2 + (c1 * s2 - s1 * c2 * cd)**2)
b = s1 * s2 + c1 * c2 * cd
return np.arctan2 (a, b) |
<SYSTEM_TASK:>
Calculate the bearing between two locations on a sphere.
<END_TASK>
<USER_TASK:>
Description:
def sphbear (lat1, lon1, lat2, lon2, tol=1e-15):
"""Calculate the bearing between two locations on a sphere.
lat1
The latitude of the first location.
lon1
The longitude of the first location.
lat2
The latitude of the second location.
lon2
The longitude of the second location.
tol
Tolerance for checking proximity to poles and rounding to zero.
The bearing (AKA the position angle, PA) is the orientation of point 2
with regards to point 1 relative to the longitudinal axis. Returns the
bearing in radians. All arguments are in radians as well. The arguments
may be vectors.
Note that the ordering of the arguments maps to the nonstandard ordering
``(Dec, RA)`` in equatorial coordinates. In a spherical projection it maps
to ``(Y, X)`` which may also be unexpected.
The sign convention is astronomical: bearings range from -π to π, with
negative values if point 2 is in the western hemisphere with regards to
point 1, positive if it is in the eastern. (That is, “east from north”.)
If point 1 is very near the pole, the bearing is undefined and the result
is NaN.
The *tol* argument is used for checking proximity to the poles and for
rounding the bearing to precisely zero if it's extremely small.
Derived from ``bear()`` in `angles.py from Prasanth Nair
<https://github.com/phn/angles>`_. His version is BSD licensed. This one
is sufficiently different that I think it counts as a separate
implementation.
""" |
# cross product on outer axis:
ocross = lambda a, b: np.cross (a, b, axisa=0, axisb=0, axisc=0)
# if args have shape S, this has shape (3, S)
v1 = np.asarray ([np.cos (lat1) * np.cos (lon1),
np.cos (lat1) * np.sin (lon1),
np.sin (lat1)])
v2 = np.asarray ([np.cos (lat2) * np.cos (lon2),
np.cos (lat2) * np.sin (lon2),
np.sin (lat2)])
is_bad = (v1[0]**2 + v1[1]**2) < tol
p12 = ocross (v1, v2) # ~"perpendicular to great circle containing points"
p1z = np.asarray ([v1[1], -v1[0], np.zeros_like (lat1)]) # ~"perp to base and Z axis"
cm = np.sqrt ((ocross (p12, p1z)**2).sum (axis=0)) # ~"angle between the vectors"
bearing = np.arctan2 (cm, np.sum (p12 * p1z, axis=0))
bearing = np.where (p12[2] < 0, -bearing, bearing) # convert to [-pi/2, pi/2]
bearing = np.where (np.abs (bearing) < tol, 0, bearing) # clamp
bearing[np.where (is_bad)] = np.nan
return bearing |
<SYSTEM_TASK:>
Offset from one location on the sphere to another.
<END_TASK>
<USER_TASK:>
Description:
def sphofs (lat1, lon1, r, pa, tol=1e-2, rmax=None):
"""Offset from one location on the sphere to another.
This function is given a start location, expressed as a latitude and
longitude, a distance to offset, and a direction to offset (expressed as a
bearing, AKA position angle). It uses these to compute a final location.
This function mirrors :func:`sphdist` and :func:`sphbear` such that::
# If:
r = sphdist (lat1, lon1, lat2a, lon2a)
pa = sphbear (lat1, lon1, lat2a, lon2a)
lat2b, lon2b = sphofs (lat1, lon1, r, pa)
# Then lat2b = lat2a and lon2b = lon2a
Arguments are:
lat1
The latitude of the start location.
lon1
The longitude of the start location.
r
The distance to offset by.
pa
The position angle (“PA” or bearing) to offset towards.
tol
The tolerance for the accuracy of the calculation.
rmax
The maximum allowed offset distance.
Returns a pair ``(lat2, lon2)``. All arguments and the return values are
measured in radians. The arguments may be vectors. The PA sign convention
is astronomical, measuring orientation east from north.
Note that the ordering of the arguments and return values maps to the
nonstandard ordering ``(Dec, RA)`` in equatorial coordinates. In a
spherical projection it maps to ``(Y, X)`` which may also be unexpected.
The offset is computed naively as::
lat2 = lat1 + r * cos (pa)
lon2 = lon1 + r * sin (pa) / cos (lat2)
This will fail for large offsets. Error checking can be done in two ways.
If *tol* is not None, :func:`sphdist` is used to calculate the actual
distance between the two locations, and if the magnitude of the fractional
difference between that and *r* is larger than *tol*, :exc:`ValueError` is
raised. This will add an overhead to the computation that may be
significant if you're going to be calling this function a lot.
Additionally, if *rmax* is not None, magnitudes of *r* greater than *rmax*
are rejected. For reference, an *r* of 0.2 (~11 deg) gives a maximum
fractional distance error of ~3%.
""" |
if rmax is not None and np.abs (r) > rmax:
raise ValueError ('sphofs radius value %f is too big for '
'our approximation' % r)
lat2 = lat1 + r * np.cos (pa)
lon2 = lon1 + r * np.sin (pa) / np.cos (lat2)
if tol is not None:
s = sphdist (lat1, lon1, lat2, lon2)
if np.any (np.abs ((s - r) / s) > tol):
raise ValueError ('sphofs approximation broke down '
'(%s %s %s %s %s %s %s)' % (lat1, lon1,
lat2, lon2,
r, s, pa))
return lat2, lon2 |
<SYSTEM_TASK:>
Calculate the parallactic angle of a sky position.
<END_TASK>
<USER_TASK:>
Description:
def parang (hourangle, declination, latitude):
"""Calculate the parallactic angle of a sky position.
This computes the parallactic angle of a sky position expressed in terms
of an hour angle and declination. Arguments:
hourangle
The hour angle of the location on the sky.
declination
The declination of the location on the sky.
latitude
The latitude of the observatory.
Inputs and outputs are all in radians. Implementation adapted from GBTIDL
``parangle.pro``.
""" |
return -np.arctan2 (-np.sin (hourangle),
np.cos (declination) * np.tan (latitude)
- np.sin (declination) * np.cos (hourangle)) |
<SYSTEM_TASK:>
Convolve two Gaussians analytically.
<END_TASK>
<USER_TASK:>
Description:
def gaussian_convolve (maj1, min1, pa1, maj2, min2, pa2):
"""Convolve two Gaussians analytically.
Given the shapes of two 2-dimensional Gaussians, this function returns
the shape of their convolution.
Arguments:
maj1
Major axis of input Gaussian 1.
min1
Minor axis of input Gaussian 1.
pa1
Orientation angle of input Gaussian 1, in radians.
maj2
Major axis of input Gaussian 2.
min2
Minor axis of input Gaussian 2.
pa2
Orientation angle of input Gaussian 2, in radians.
The return value is ``(maj3, min3, pa3)``, with the same format as the
input arguments. The axes can be measured in any units, so long as they're
consistent.
Implementation copied from MIRIAD’s ``gaufac``.
""" |
c1 = np.cos (pa1)
s1 = np.sin (pa1)
c2 = np.cos (pa2)
s2 = np.sin (pa2)
a = (maj1*c1)**2 + (min1*s1)**2 + (maj2*c2)**2 + (min2*s2)**2
b = (maj1*s1)**2 + (min1*c1)**2 + (maj2*s2)**2 + (min2*c2)**2
g = 2 * ((min1**2 - maj1**2) * s1 * c1 + (min2**2 - maj2**2) * s2 * c2)
s = a + b
t = np.sqrt ((a - b)**2 + g**2)
maj3 = np.sqrt (0.5 * (s + t))
min3 = np.sqrt (0.5 * (s - t))
if abs (g) + abs (a - b) == 0:
pa3 = 0.
else:
pa3 = 0.5 * np.arctan2 (-g, a - b)
# "Amplitude of the resulting Gaussian":
# f = pi / (4 * np.log (2)) * maj1 * min1 * maj2 * min2 \
# / np.sqrt (a * b - 0.25 * g**2)
return maj3, min3, pa3 |
<SYSTEM_TASK:>
Deconvolve two Gaussians analytically.
<END_TASK>
<USER_TASK:>
Description:
def gaussian_deconvolve (smaj, smin, spa, bmaj, bmin, bpa):
"""Deconvolve two Gaussians analytically.
Given the shapes of 2-dimensional “source” and “beam” Gaussians, this
returns a deconvolved “result” Gaussian such that the convolution of
“beam” and “result” is “source”.
Arguments:
smaj
Major axis of source Gaussian.
smin
Minor axis of source Gaussian.
spa
Orientation angle of source Gaussian, in radians.
bmaj
Major axis of beam Gaussian.
bmin
Minor axis of beam Gaussian.
bpa
Orientation angle of beam Gaussian, in radians.
The return value is ``(rmaj, rmin, rpa, status)``. The first three values
have the same format as the input arguments. The *status* result is one of
"ok", "pointlike", or "fail". A "pointlike" status indicates that the
source and beam shapes are difficult to distinguish; a "fail" status
indicates that the two shapes seem to be mutually incompatible (e.g.,
source and beam are very narrow and orthogonal).
The axes can be measured in any units, so long as they're consistent.
Ideally if::
rmaj, rmin, rpa, status = gaussian_deconvolve (smaj, smin, spa, bmaj, bmin, bpa)
then::
smaj, smin, spa = gaussian_convolve (rmaj, rmin, rpa, bmaj, bmin, bpa)
Implementation derived from MIRIAD’s ``gaudfac``. This function currently
doesn't do a great job of dealing with pointlike sources, i.e. ones where
“source” and “beam” are nearly indistinguishable.
""" |
# I've added extra code to ensure ``smaj >= bmaj``, ``smin >= bmin``, and
# increased the coefficient in front of "limit" from 0.1 to 0.5. Feel a
# little wary about that first change.
from numpy import cos, sin, sqrt, min, abs, arctan2
if smaj < bmaj:
smaj = bmaj
if smin < bmin:
smin = bmin
alpha = ((smaj * cos (spa))**2 + (smin * sin (spa))**2 -
(bmaj * cos (bpa))**2 - (bmin * sin (bpa))**2)
beta = ((smaj * sin (spa))**2 + (smin * cos (spa))**2 -
(bmaj * sin (bpa))**2 - (bmin * cos (bpa))**2)
gamma = 2 * ((smin**2 - smaj**2) * sin (spa) * cos (spa) -
(bmin**2 - bmaj**2) * sin (bpa) * cos (bpa))
s = alpha + beta
t = sqrt ((alpha - beta)**2 + gamma**2)
limit = 0.5 * min ([smaj, smin, bmaj, bmin])**2
status = 'ok'
if alpha < 0 or beta < 0 or s < t:
dmaj = dmin = dpa = 0
if 0.5 * (s - t) < limit and alpha > -limit and beta > -limit:
status = 'pointlike'
else:
status = 'fail'
else:
dmaj = sqrt (0.5 * (s + t))
dmin = sqrt (0.5 * (s - t))
if abs (gamma) + abs (alpha - beta) == 0:
dpa = 0
else:
dpa = 0.5 * arctan2 (-gamma, alpha - beta)
return dmaj, dmin, dpa, status |
<SYSTEM_TASK:>
Load data files used in Skyfield. This will download files from the
<END_TASK>
<USER_TASK:>
Description:
def load_skyfield_data():
"""Load data files used in Skyfield. This will download files from the
internet if they haven't been downloaded before.
Skyfield downloads files to the current directory by default, which is not
ideal. Here we abuse astropy and use its cache directory to cache the data
files per-user. If we start downloading files in other places in pwkit we
should maybe make this system more generic. And the dep on astropy is not
at all necessary.
Skyfield will print out a progress bar as it downloads things.
Returns ``(planets, ts)``, the standard Skyfield ephemeris and timescale
data files.
""" |
import os.path
from astropy.config import paths
from skyfield.api import Loader
cache_dir = os.path.join(paths.get_cache_dir(), 'pwkit')
loader = Loader(cache_dir)
planets = loader('de421.bsp')
ts = loader.timescale()
return planets, ts |
<SYSTEM_TASK:>
Given a 2MASS position, look up the epoch when it was observed.
<END_TASK>
<USER_TASK:>
Description:
def get_2mass_epoch (tmra, tmdec, debug=False):
"""Given a 2MASS position, look up the epoch when it was observed.
This function uses the CDS Vizier web service to look up information in
the 2MASS point source database. Arguments are:
tmra
The source's J2000 right ascension, in radians.
tmdec
The source's J2000 declination, in radians.
debug
If True, the web server's response will be printed to :data:`sys.stdout`.
The return value is an MJD. If the lookup fails, a message will be printed
to :data:`sys.stderr` (unconditionally!) and the :data:`J2000` epoch will
be returned.
""" |
import codecs
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
postdata = b'''-mime=csv
-source=2MASS
-out=_q,JD
-c=%.6f %.6f
-c.eq=J2000''' % (tmra * R2D, tmdec * R2D)
jd = None
for line in codecs.getreader('utf-8')(urlopen (_vizurl, postdata)):
line = line.strip ()
if debug:
print_ ('D: 2M >>', line)
if line.startswith ('1;'):
jd = float (line[2:])
if jd is None:
import sys
print_ ('warning: 2MASS epoch lookup failed; astrometry could be very wrong!',
file=sys.stderr)
return J2000
return jd - 2400000.5 |
<SYSTEM_TASK:>
Validate that the attributes are self-consistent.
<END_TASK>
<USER_TASK:>
Description:
def verify (self, complain=True):
"""Validate that the attributes are self-consistent.
This function does some basic checks of the object attributes to
ensure that astrometric calculations can legally be performed. If the
*complain* keyword is true, messages may be printed to
:data:`sys.stderr` if non-fatal issues are encountered.
Returns *self*.
""" |
import sys
if self.ra is None:
raise ValueError ('AstrometryInfo missing "ra"')
if self.dec is None:
raise ValueError ('AstrometryInfo missing "dec"')
if self._partial_info (self.promo_ra, self.promo_dec):
raise ValueError ('partial proper-motion info in AstrometryInfo')
if self._partial_info (self.pos_u_maj, self.pos_u_min, self.pos_u_pa):
raise ValueError ('partial positional uncertainty info in AstrometryInfo')
if self._partial_info (self.promo_u_maj, self.promo_u_min, self.promo_u_pa):
raise ValueError ('partial proper-motion uncertainty info in AstrometryInfo')
if self.pos_u_maj is None:
if complain:
print_ ('AstrometryInfo: no positional uncertainty info', file=sys.stderr)
elif self.pos_u_maj < self.pos_u_min:
# Based on experience with PM, this may be possible
if complain:
print_ ('AstrometryInfo: swapped positional uncertainty '
'major/minor axes', file=sys.stderr)
self.pos_u_maj, self.pos_u_min = self.pos_u_min, self.pos_u_maj
self.pos_u_pa += 0.5 * np.pi
if self.pos_epoch is None:
if complain:
print_('AstrometryInfo: assuming epoch of position is J2000.0', file=sys.stderr)
if self.promo_ra is None:
if complain:
print_ ('AstrometryInfo: assuming zero proper motion', file=sys.stderr)
elif self.promo_u_maj is None:
if complain:
print_ ('AstrometryInfo: no uncertainty on proper motion', file=sys.stderr)
elif self.promo_u_maj < self.promo_u_min:
# I've seen this: V* V374 Peg
if complain:
print_ ('AstrometryInfo: swapped proper motion uncertainty '
'major/minor axes', file=sys.stderr)
self.promo_u_maj, self.promo_u_min = self.promo_u_min, self.promo_u_maj
self.promo_u_pa += 0.5 * np.pi
if self.parallax is None:
if complain:
print_ ('AstrometryInfo: assuming zero parallax', file=sys.stderr)
else:
if self.parallax < 0.:
raise ValueError ('negative parallax in AstrometryInfo')
if self.u_parallax is None:
if complain:
print_ ('AstrometryInfo: no uncertainty on parallax', file=sys.stderr)
if self.vradial is None:
pass # not worth complaining
elif self.u_vradial is None:
if complain:
print_ ('AstrometryInfo: no uncertainty on v_radial', file=sys.stderr)
return self |
<SYSTEM_TASK:>
Fill in astrometric information using the Simbad web service.
<END_TASK>
<USER_TASK:>
Description:
def fill_from_simbad (self, ident, debug=False):
"""Fill in astrometric information using the Simbad web service.
This uses the CDS Simbad web service to look up astrometric
information for the source name *ident* and fills in attributes
appropriately. Values from Simbad are not always reliable.
Returns *self*.
""" |
info = get_simbad_astrometry_info (ident, debug=debug)
posref = 'unknown'
for k, v in six.iteritems (info):
if '~' in v:
continue # no info
if k == 'COO(d;A)':
self.ra = float (v) * D2R
elif k == 'COO(d;D)':
self.dec = float (v) * D2R
elif k == 'COO(E)':
a = v.split ()
self.pos_u_maj = float (a[0]) * A2R * 1e-3 # mas -> rad
self.pos_u_min = float (a[1]) * A2R * 1e-3
self.pos_u_pa = float (a[2]) * D2R
elif k == 'COO(B)':
posref = v
elif k == 'PM(A)':
self.promo_ra = float (v) # mas/yr
elif k == 'PM(D)':
self.promo_dec = float (v) # mas/yr
elif k == 'PM(E)':
a = v.split ()
self.promo_u_maj = float (a[0]) # mas/yr
self.promo_u_min = float (a[1])
self.promo_u_pa = float (a[2]) * D2R # rad!
elif k == 'PLX(V)':
self.parallax = float (v) # mas
elif k == 'PLX(E)':
self.u_parallax = float (v) # mas
elif k == 'RV(V)':
self.vradial = float (v) # km/s
elif k == 'RV(E)':
self.u_vradial = float (v) #km/s
if self.ra is None:
raise Exception ('no position returned by Simbad for "%s"' % ident)
if self.u_parallax == 0:
self.u_parallax = None
if self.u_vradial == 0:
self.u_vradial = None
# Get the right epoch of position for 2MASS positions
if posref == '2003yCat.2246....0C':
self.pos_epoch = get_2mass_epoch (self.ra, self.dec, debug)
return self |
<SYSTEM_TASK:>
Fill in astrometric information from the AllWISE catalog using Astroquery.
<END_TASK>
<USER_TASK:>
Description:
def fill_from_allwise (self, ident, catalog_ident='II/328/allwise'):
"""Fill in astrometric information from the AllWISE catalog using Astroquery.
This uses the :mod:`astroquery` module to query the AllWISE
(2013wise.rept....1C) source catalog through the Vizier
(2000A&AS..143...23O) web service. It then fills in the instance with
the relevant information. Arguments are:
ident
The AllWISE catalog identifier of the form ``"J112254.70+255021.9"``.
catalog_ident
The Vizier designation of the catalog to query. The default is
"II/328/allwise", the current version of the AllWISE catalog.
Raises :exc:`~pwkit.PKError` if something unexpected happens that
doesn't itself result in an exception within :mod:`astroquery`.
You should probably prefer :meth:`fill_from_simbad` for objects that
are known to the CDS Simbad service, but not all objects in the
AllWISE catalog are so known.
If you use this function, you should `acknowledge AllWISE
<http://irsadist.ipac.caltech.edu/wise-allwise/>`_ and `Vizier
<http://cds.u-strasbg.fr/vizier-org/licences_vizier.html>`_.
Returns *self*.
""" |
from astroquery.vizier import Vizier
import numpy.ma.core as ma_core
# We should match exactly one table and one row within that table, but
# for robustness we ignore additional results if they happen to
# appear. Strangely, querying for an invalid identifier yields a table
# with two rows that are filled with masked out data.
table_list = Vizier.query_constraints (catalog=catalog_ident, AllWISE=ident)
if not len (table_list):
raise PKError ('Vizier query returned no tables (catalog=%r AllWISE=%r)',
catalog_ident, ident)
table = table_list[0]
if not len (table):
raise PKError ('Vizier query returned empty %s table (catalog=%r AllWISE=%r)',
table.meta['name'], catalog_ident, ident)
row = table[0]
if isinstance (row['_RAJ2000'], ma_core.MaskedConstant):
raise PKError ('Vizier query returned flagged row in %s table; your AllWISE '
'identifier likely does not exist (it should be of the form '
'"J112254.70+255021.9"; catalog=%r AllWISE=%r)',
table.meta['name'], catalog_ident, ident)
# OK, we can actually do this.
self.ra = row['RA_pm'] * D2R
self.dec = row['DE_pm'] * D2R
if row['e_RA_pm'] > row['e_DE_pm']:
self.pos_u_maj = row['e_RA_pm'] * A2R
self.pos_u_min = row['e_DE_pm'] * A2R
self.pos_u_pa = halfpi
else:
self.pos_u_maj = row['e_DE_pm'] * A2R
self.pos_u_min = row['e_RA_pm'] * A2R
self.pos_u_pa = 0
self.pos_epoch = 55400. # hardcoded in the catalog
self.promo_ra = row['pmRA']
self.promo_dec = row['pmDE']
if row['e_pmRA'] > row['e_pmDE']:
self.promo_u_maj = row['e_pmRA'] * 1.
self.promo_u_min = row['e_pmDE'] * 1.
self.promo_u_pa = halfpi
else:
self.promo_u_maj = row['e_pmDE'] * 1.
self.promo_u_min = row['e_pmRA'] * 1.
self.promo_u_pa = 0.
return self |
<SYSTEM_TASK:>
Install a signal handler such that this program prints a Python traceback
<END_TASK>
<USER_TASK:>
Description:
def backtrace_on_usr1 ():
"""Install a signal handler such that this program prints a Python traceback
upon receipt of SIGUSR1. This could be useful for checking that
long-running programs are behaving properly, or for discovering where an
infinite loop is occurring.
Note, however, that the Python interpreter does not invoke Python signal
handlers exactly when the process is signaled. For instance, a signal
delivered in the midst of a time.sleep() call will only be seen by Python
code after that call completes. This means that this feature may not be as
helpful as one might like for debugging certain kinds of problems.
""" |
import signal
try:
signal.signal (signal.SIGUSR1, _print_backtrace_signal_handler)
except Exception as e:
warn ('failed to set up Python backtraces on SIGUSR1: %s', e) |
<SYSTEM_TASK:>
Fork this process, creating a subprocess detached from the current context.
<END_TASK>
<USER_TASK:>
Description:
def fork_detached_process ():
"""Fork this process, creating a subprocess detached from the current context.
Returns a :class:`pwkit.Holder` instance with information about what
happened. Its fields are:
whoami
A string, either "original" or "forked" depending on which process we are.
pipe
An open binary file descriptor. It is readable by the original process
and writable by the forked one. This can be used to pass information
from the forked process to the one that launched it.
forkedpid
The PID of the forked process. Note that this process is *not* a child of
the original one, so waitpid() and friends may not be used on it.
Example::
from pwkit import cli
info = cli.fork_detached_process ()
if info.whoami == 'original':
message = info.pipe.readline ().decode ('utf-8')
if not len (message):
cli.die ('forked process (PID %d) appears to have died', info.forkedpid)
info.pipe.close ()
print ('forked process said:', message)
else:
info.pipe.write ('hello world'.encode ('utf-8'))
info.pipe.close ()
As always, the *vital* thing to understand is that immediately after a
call to this function, you have **two** nearly-identical but **entirely
independent** programs that are now both running simultaneously. Until you
execute some kind of ``if`` statement, the only difference between the two
processes is the value of the ``info.whoami`` field and whether
``info.pipe`` is readable or writeable.
This function uses :func:`os.fork` twice and also calls :func:`os.setsid`
in between the two invocations, which creates new session and process
groups for the forked subprocess. It does *not* perform other operations
that you might want, such as changing the current directory, dropping
privileges, closing file descriptors, and so on. For more discussion of
best practices when it comes to “daemonizing” processes, see (stalled)
`PEP 3143`_.
.. _PEP 3143: https://www.python.org/dev/peps/pep-3143/
""" |
import os, struct
from .. import Holder
payload = struct.Struct ('L')
info = Holder ()
readfd, writefd = os.pipe ()
pid1 = os.fork ()
if pid1 > 0:
info.whoami = 'original'
info.pipe = os.fdopen (readfd, 'rb')
os.close (writefd)
retcode = os.waitpid (pid1, 0)[1]
if retcode:
raise Exception ('child process exited with error code %d' % retcode)
(info.forkedpid,) = payload.unpack (info.pipe.read (payload.size))
else:
# We're the intermediate child process. Start new session and process
# groups, detaching us from TTY signals and whatnot.
os.setsid ()
pid2 = os.fork ()
if pid2 > 0:
# We're the intermediate process; we're all done
os._exit (0)
# If we get here, we're the detached child process.
info.whoami = 'forked'
info.pipe = os.fdopen (writefd, 'wb')
os.close (readfd)
info.forkedpid = os.getpid ()
info.pipe.write (payload.pack (info.forkedpid))
return info |
<SYSTEM_TASK:>
A lame routine for grabbing command-line arguments. Returns a boolean
<END_TASK>
<USER_TASK:>
Description:
def pop_option (ident, argv=None):
"""A lame routine for grabbing command-line arguments. Returns a boolean
indicating whether the option was present. If it was, it's removed from
the argument string. Because of the lame behavior, options can't be
combined, and non-boolean options aren't supported. Operates on sys.argv
by default.
Note that this will proceed merrily if argv[0] matches your option.
""" |
if argv is None:
from sys import argv
if len (ident) == 1:
ident = '-' + ident
else:
ident = '--' + ident
found = ident in argv
if found:
argv.remove (ident)
return found |
<SYSTEM_TASK:>
Print program usage information and exit.
<END_TASK>
<USER_TASK:>
Description:
def show_usage (docstring, short, stream, exitcode):
"""Print program usage information and exit.
:arg str docstring: the program help text
This function just prints *docstring* and exits. In most cases, the
function :func:`check_usage` should be used: it automatically checks
:data:`sys.argv` for a sole "-h" or "--help" argument and invokes this
function.
This function is provided in case there are instances where the user
should get a friendly usage message that :func:`check_usage` doesn't catch.
It can be contrasted with :func:`wrong_usage`, which prints a terser usage
message and exits with an error code.
""" |
if stream is None:
from sys import stdout as stream
if not short:
print ('Usage:', docstring.strip (), file=stream)
else:
intext = False
for l in docstring.splitlines ():
if intext:
if not len (l):
break
print (l, file=stream)
elif len (l):
intext = True
print ('Usage:', l, file=stream)
print ('\nRun with a sole argument --help for more detailed '
'usage information.', file=stream)
raise SystemExit (exitcode) |
<SYSTEM_TASK:>
Print a message indicating invalid command-line arguments and exit with an
<END_TASK>
<USER_TASK:>
Description:
def wrong_usage (docstring, *rest):
"""Print a message indicating invalid command-line arguments and exit with an
error code.
:arg str docstring: the program help text
:arg rest: an optional specific error message
This function is intended for small programs launched from the command
line. The intention is for the program help information to be written in
its docstring, and then for argument checking to look something like
this::
\"\"\"mytask <input> <output>
Do something to the input to create the output.
\"\"\"
...
import sys
... # other setup
check_usage (__doc__)
... # more setup
if len (sys.argv) != 3:
wrong_usage (__doc__, "expect exactly 2 arguments, not %d",
len (sys.argv))
When called, an error message is printed along with the *first stanza* of
*docstring*. The program then exits with an error code and a suggestion to
run the program with a --help argument to see more detailed usage
information. The "first stanza" of *docstring* is defined as everything up
until the first blank line, ignoring any leading blank lines.
The optional message in *rest* is treated as follows. If *rest* is empty,
the error message "invalid command-line arguments" is printed. If it is a
single item, the stringification of that item is printed. If it is more
than one item, the first item is treated as a format string, and it is
percent-formatted with the remaining values. See the above example.
See also :func:`check_usage` and :func:`show_usage`.
""" |
intext = False
if len (rest) == 0:
detail = 'invalid command-line arguments'
elif len (rest) == 1:
detail = rest[0]
else:
detail = rest[0] % tuple (rest[1:])
print ('error:', detail, '\n', file=sys.stderr) # extra NL
show_usage (docstring, True, sys.stderr, 1) |
<SYSTEM_TASK:>
Handle an uncaught exception. We always forward the exception on to
<END_TASK>
<USER_TASK:>
Description:
def excepthook (self, etype, evalue, etb):
"""Handle an uncaught exception. We always forward the exception on to
whatever `sys.excepthook` was present upon setup. However, if the
exception is a KeyboardInterrupt, we additionally kill ourselves with
an uncaught SIGINT, so that invoking programs know what happened.
""" |
self.inner_excepthook (etype, evalue, etb)
if issubclass (etype, KeyboardInterrupt):
# Don't try this at home, kids. On some systems os.kill (0, ...)
# signals our entire progress group, which is not what we want,
# so we use os.getpid ().
signal.signal (signal.SIGINT, signal.SIG_DFL)
os.kill (os.getpid (), signal.SIGINT) |
<SYSTEM_TASK:>
Calculate the cyclotron frequency in Hz given a magnetic field strength in Gauss.
<END_TASK>
<USER_TASK:>
Description:
def calc_nu_b(b):
"""Calculate the cyclotron frequency in Hz given a magnetic field strength in Gauss.
This is in cycles per second not radians per second; i.e. there is a 2π in
the denominator: ν_B = e B / (2π m_e c)
""" |
return cgs.e * b / (2 * cgs.pi * cgs.me * cgs.c) |
<SYSTEM_TASK:>
Calculate a flux density from pure free-free emission.
<END_TASK>
<USER_TASK:>
Description:
def calc_freefree_snu_ujy(ne, t, width, elongation, dist, ghz):
"""Calculate a flux density from pure free-free emission.
""" |
hz = ghz * 1e9
eta = calc_freefree_eta(ne, t, hz)
kappa = calc_freefree_kappa(ne, t, hz)
snu = calc_snu(eta, kappa, width, elongation, dist)
ujy = snu * cgs.jypercgs * 1e6
return ujy |
<SYSTEM_TASK:>
Concatenate visibility measurement sets.
<END_TASK>
<USER_TASK:>
Description:
def concat(invises, outvis, timesort=False):
"""Concatenate visibility measurement sets.
invises (list of str)
Paths to the input measurement sets
outvis (str)
Path to the output measurement set.
timesort (boolean)
If true, sort the output in time after concatenation.
Example::
from pwkit.environments.casa import tasks
tasks.concat(['epoch1.ms', 'epoch2.ms'], 'combined.ms')
""" |
tb = util.tools.table()
ms = util.tools.ms()
if os.path.exists(outvis):
raise RuntimeError('output "%s" already exists' % outvis)
for invis in invises:
if not os.path.isdir(invis):
raise RuntimeError('input "%s" does not exist' % invis)
tb.open(b(invises[0]))
tb.copy(b(outvis), deep=True, valuecopy=True)
tb.close()
ms.open(b(outvis), nomodify=False)
for invis in invises[1:]:
ms.concatenate(msfile=b(invis), freqtol=b(concat_freqtol),
dirtol=b(concat_dirtol))
ms.writehistory(message=b'taskname=tasklib.concat', origin=b'tasklib.concat')
ms.writehistory(message=b('vis = ' + ', '.join(invises)), origin=b'tasklib.concat')
ms.writehistory(message=b('timesort = ' + 'FT'[int(timesort)]), origin=b'tasklib.concat')
if timesort:
ms.timesort()
ms.close() |
<SYSTEM_TASK:>
Delete the ``MODEL_DATA`` and ``CORRECTED_DATA`` columns from a measurement set.
<END_TASK>
<USER_TASK:>
Description:
def delcal(mspath):
"""Delete the ``MODEL_DATA`` and ``CORRECTED_DATA`` columns from a measurement set.
mspath (str)
The path to the MS to modify
Example::
from pwkit.environments.casa import tasks
tasks.delcal('dataset.ms')
""" |
wantremove = 'MODEL_DATA CORRECTED_DATA'.split()
tb = util.tools.table()
tb.open(b(mspath), nomodify=False)
cols = frozenset(tb.colnames())
toremove = [b(c) for c in wantremove if c in cols]
if len(toremove):
tb.removecols(toremove)
tb.close()
# We want to return a `str` type, which is what we already
# have in Python 2 but not in 3.
if six.PY2:
return toremove
else:
return [c.decode('utf8') for c in toremove] |
<SYSTEM_TASK:>
Command-line access to ``delmod`` functionality.
<END_TASK>
<USER_TASK:>
Description:
def delmod_cli(argv, alter_logger=True):
"""Command-line access to ``delmod`` functionality.
The ``delmod`` task deletes "on-the-fly" model information from a
Measurement Set. It is so easy to implement that a standalone
function is essentially unnecessary. Just write::
from pwkit.environments.casa import util
cb = util.tools.calibrater()
cb.open('datasaet.ms', addcorr=False, addmodel=False)
cb.delmod(otf=True, scr=False)
cb.close()
If you want to delete the scratch columns, use :func:`delcal`. If you want
to clear the scratch columns, use :func:`clearcal`.
""" |
check_usage(delmod_doc, argv, usageifnoargs=True)
if alter_logger:
util.logger()
cb = util.tools.calibrater()
for mspath in argv[1:]:
cb.open(b(mspath), addcorr=False, addmodel=False)
cb.delmod(otf=True, scr=False)
cb.close() |
<SYSTEM_TASK:>
Make a flags file out of a bandpass calibration table
<END_TASK>
<USER_TASK:>
Description:
def extractbpflags(calpath, deststream):
"""Make a flags file out of a bandpass calibration table
calpath (str)
The path to the bandpass calibration table
deststream (stream-like object, e.g. an opened file)
Where to write the flags data
Below is documentation written for the command-line interface to this
functionality:
""" |
tb = util.tools.table()
tb.open(b(os.path.join(calpath, 'ANTENNA')))
antnames = tb.getcol(b'NAME')
tb.close()
tb.open(b(calpath))
try:
t = tb.getkeyword(b'VisCal')
except RuntimeError:
raise PKError('no "VisCal" keyword in %s; it doesn\'t seem to be a '
'bandpass calibration table', calpath)
if t != 'B Jones':
raise PKError('table %s doesn\'t seem to be a bandpass calibration '
'table; its type is "%s"', calpath, t)
def emit(antidx, spwidx, chanstart, chanend):
# Channel ranges are inclusive, unlike Python syntax.
print("antenna='%s&*' spw='%d:%d~%d' reason='BANDPASS_FLAGGED'" % \
(antnames[antidx], spwidx, chanstart, chanend), file=deststream)
for row in range(tb.nrows()):
ant = tb.getcell(b'ANTENNA1', row)
spw = tb.getcell(b'SPECTRAL_WINDOW_ID', row)
flag = tb.getcell(b'FLAG', row)
# This is the logical 'or' of the two polarizations: i.e., anything that
# is flagged in either poln is flagged in this.
sqflag = ~((~flag).prod(axis=0, dtype=np.bool))
runstart = None
for i in range(sqflag.size):
if sqflag[i]:
# This channel is flagged. Start a run if not already in one.
if runstart is None:
runstart = i
elif runstart is not None:
# The current run just ended.
emit(ant, spw, runstart, i - 1)
runstart = None
if runstart is not None:
emit(ant, spw, runstart, i)
tb.close() |
<SYSTEM_TASK:>
Command-line access to ``flagmanager`` functionality.
<END_TASK>
<USER_TASK:>
Description:
def flagmanager_cli(argv, alter_logger=True):
"""Command-line access to ``flagmanager`` functionality.
The ``flagmanager`` task manages tables of flags associated with
measurement sets. Its features are easy to implement that a standalone
library function is essentially unnecessary. See the source code to this
function for the tool calls that implement different parts of the
``flagmanager`` functionality.
""" |
check_usage(flagmanager_doc, argv, usageifnoargs=True)
if len(argv) < 3:
wrong_usage(flagmanager_doc, 'expect at least a mode and an MS name')
mode = argv[1]
ms = argv[2]
if alter_logger:
if mode == 'list':
util.logger('info')
elif mode == 'delete':
# it WARNs 'deleting version xxx' ... yargh
util.logger('severe')
else:
util.logger()
try:
factory = util.tools.agentflagger
except AttributeError:
factory = util.tools.testflagger
af = factory()
af.open(b(ms))
if mode == 'list':
if len(argv) != 3:
wrong_usage(flagmanager_doc, 'expect exactly one argument in list mode')
af.getflagversionlist()
elif mode == 'save':
if len(argv) != 4:
wrong_usage(flagmanager_doc, 'expect exactly two arguments in save mode')
from time import strftime
name = argv[3]
af.saveflagversion(versionname=b(name), merge=b'replace',
comment=b('created %s(casatask flagmanager)'
% strftime('%Y-%m-%dT%H:%M:%SZ')))
elif mode == 'restore':
if len(argv) != 4:
wrong_usage(flagmanager_doc, 'expect exactly two arguments in restore mode')
name = argv[3]
af.restoreflagversion(versionname=b(name), merge=b'replace')
elif mode == 'delete':
if len(argv) != 4:
wrong_usage(flagmanager_doc, 'expect exactly two arguments in delete mode')
name = argv[3]
if not os.path.isdir(os.path.join(ms + '.flagversions', 'flags.' + name)):
# This condition only results in a WARN from deleteflagversion()!
raise RuntimeError('version "%s" doesn\'t exist in "%s.flagversions"'
% (name, ms))
af.deleteflagversion(versionname=b(name))
else:
wrong_usage(flagmanager_doc, 'unknown flagmanager mode "%s"' % mode)
af.done() |
<SYSTEM_TASK:>
Convert an image in MS format to FITS format.
<END_TASK>
<USER_TASK:>
Description:
def image2fits(mspath, fitspath, velocity=False, optical=False, bitpix=-32,
minpix=0, maxpix=-1, overwrite=False, dropstokes=False, stokeslast=True,
history=True, **kwargs):
"""Convert an image in MS format to FITS format.
mspath (str)
The path to the input MS.
fitspath (str)
The path to the output FITS file.
velocity (boolean)
(To be documented.)
optical (boolean)
(To be documented.)
bitpix (integer)
(To be documented.)
minpix (integer)
(To be documented.)
maxpix (integer)
(To be documented.)
overwrite (boolean)
Whether the task is allowed to overwrite an existing destination file.
dropstokes (boolean)
Whether the "Stokes" (polarization) axis of the image should be dropped.
stokeslast (boolean)
Whether the "Stokes" (polarization) axis of the image should be placed as the last
(innermost?) axis of the image cube.
history (boolean)
(To be documented.)
``**kwargs``
Forwarded on to the ``tofits`` function of the CASA ``image`` tool.
""" |
ia = util.tools.image()
ia.open(b(mspath))
ia.tofits(outfile=b(fitspath), velocity=velocity, optical=optical, bitpix=bitpix,
minpix=minpix, maxpix=maxpix, overwrite=overwrite, dropstokes=dropstokes,
stokeslast=stokeslast, history=history, **kwargs)
ia.close() |
<SYSTEM_TASK:>
Convert an ALMA low-level ASDM dataset to Measurement Set format.
<END_TASK>
<USER_TASK:>
Description:
def importalma(asdm, ms):
"""Convert an ALMA low-level ASDM dataset to Measurement Set format.
asdm (str)
The path to the input ASDM dataset.
ms (str)
The path to the output MS dataset.
This implementation automatically infers the value of the "tbuff"
parameter.
Example::
from pwkit.environments.casa import tasks
tasks.importalma('myalma.asdm', 'myalma.ms')
""" |
from .scripting import CasapyScript
script = os.path.join(os.path.dirname(__file__), 'cscript_importalma.py')
with CasapyScript(script, asdm=asdm, ms=ms) as cs:
pass |
<SYSTEM_TASK:>
Convert an EVLA low-level SDM dataset to Measurement Set format.
<END_TASK>
<USER_TASK:>
Description:
def importevla(asdm, ms):
"""Convert an EVLA low-level SDM dataset to Measurement Set format.
asdm (str)
The path to the input ASDM dataset.
ms (str)
The path to the output MS dataset.
This implementation automatically infers the value of the "tbuff"
parameter.
Example::
from pwkit.environments.casa import tasks
tasks.importevla('myvla.sdm', 'myvla.ms')
""" |
from .scripting import CasapyScript
# Here's the best way I can figure to find the recommended value of tbuff
#(= 1.5 * integration time). Obviously you might have different
# integration times in the dataset and such, and we're just going to
# ignore that possibility.
bdfstem = os.listdir(os.path.join(asdm, 'ASDMBinary'))[0]
bdf = os.path.join(asdm, 'ASDMBinary', bdfstem)
tbuff = None
with open(bdf, 'rb') as f:
for linenum, line in enumerate(f):
if linenum > 60:
raise PKError('cannot find integration time info in %s', bdf)
if not line.startswith(b'<sdmDataSubsetHeader'):
continue
try:
i1 = line.index(b'<interval>') + len(b'<interval>')
i2 = line.index(b'</interval>')
if i2 <= i1:
raise ValueError()
except ValueError:
raise PKError('cannot parse integration time info in %s', bdf)
tbuff = float(line[i1:i2]) * 1.5e-9 # nanosecs, and want 1.5x
break
if tbuff is None:
raise PKError('found no integration time info')
print('importevla: %s -> %s with tbuff=%.2f' % (asdm, ms, tbuff))
script = os.path.join(os.path.dirname(__file__), 'cscript_importevla.py')
with CasapyScript(script, asdm=asdm, ms=ms, tbuff=tbuff) as cs:
pass |
<SYSTEM_TASK:>
Textually describe the contents of a measurement set.
<END_TASK>
<USER_TASK:>
Description:
def listobs(vis):
"""Textually describe the contents of a measurement set.
vis (str)
The path to the dataset.
Returns
A generator of lines of human-readable output
Errors can only be detected by looking at the output. Example::
from pwkit.environments.casa import tasks
for line in tasks.listobs('mydataset.ms'):
print(line)
""" |
def inner_list(sink):
try:
ms = util.tools.ms()
ms.open(vis)
ms.summary(verbose=True)
ms.close()
except Exception as e:
sink.post(b'listobs failed: %s' % e, priority=b'SEVERE')
for line in util.forkandlog(inner_list):
info = line.rstrip().split('\t', 3) # date, priority, origin, message
if len(info) > 3:
yield info[3]
else:
yield '' |
<SYSTEM_TASK:>
Convert an MJD to a data string in the format used by CASA.
<END_TASK>
<USER_TASK:>
Description:
def mjd2date(mjd, precision=3):
"""Convert an MJD to a data string in the format used by CASA.
mjd (numeric)
An MJD value in the UTC timescale.
precision (integer, default 3)
The number of digits of decimal precision in the seconds portion of
the returned string
Returns
A string representing the input argument in CASA format:
``YYYY/MM/DD/HH:MM:SS.SSS``.
Example::
from pwkit.environment.casa import tasks
print(tasks.mjd2date(55555))
# yields '2010/12/25/00:00:00.000'
""" |
from astropy.time import Time
dt = Time(mjd, format='mjd', scale='utc').to_datetime()
fracsec = ('%.*f' % (precision, 1e-6 * dt.microsecond)).split('.')[1]
return '%04d/%02d/%02d/%02d:%02d:%02d.%s' % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, fracsec
) |
<SYSTEM_TASK:>
Plot the physical layout of the antennas described in the MS.
<END_TASK>
<USER_TASK:>
Description:
def plotants(vis, figfile):
"""Plot the physical layout of the antennas described in the MS.
vis (str)
Path to the input dataset
figfile (str)
Path to the output image file.
The output image format will be inferred from the extension of *figfile*.
Example::
from pwkit.environments.casa import tasks
tasks.plotants('dataset.ms', 'antennas.png')
""" |
from .scripting import CasapyScript
script = os.path.join(os.path.dirname(__file__), 'cscript_plotants.py')
with CasapyScript(script, vis=vis, figfile=figfile) as cs:
pass |
<SYSTEM_TASK:>
Render an object in LaTeX appropriately.
<END_TASK>
<USER_TASK:>
Description:
def latexify(obj, **kwargs):
"""Render an object in LaTeX appropriately.
""" |
if hasattr(obj, '__pk_latex__'):
return obj.__pk_latex__(**kwargs)
if isinstance(obj, text_type):
from .unicode_to_latex import unicode_to_latex
return unicode_to_latex(obj)
if isinstance(obj, bool):
# isinstance(True, int) = True, so gotta handle this first.
raise ValueError('no well-defined LaTeXification of bool %r' % obj)
if isinstance(obj, float):
nplaces = kwargs.get('nplaces')
if nplaces is None:
return '$%f$' % obj
return '$%.*f$' % (nplaces, obj)
if isinstance(obj, int):
return '$%d$' % obj
if isinstance(obj, binary_type):
if all(c in _printable_ascii for c in obj):
return obj.decode('ascii')
raise ValueError('no safe LaTeXification of binary string %r' % obj)
raise ValueError('can\'t LaTeXify %r' % obj) |
<SYSTEM_TASK:>
Render a number into LaTeX in a 2-column format, where the columns split
<END_TASK>
<USER_TASK:>
Description:
def latexify_n2col(x, nplaces=None, **kwargs):
"""Render a number into LaTeX in a 2-column format, where the columns split
immediately to the left of the decimal point. This gives nice alignment of
numbers in a table.
""" |
if nplaces is not None:
t = '%.*f' % (nplaces, x)
else:
t = '%f' % x
if '.' not in t:
return '$%s$ &' % t
left, right = t.split('.')
return '$%s$ & $.%s$' % (left, right) |
<SYSTEM_TASK:>
Convert an object to special LaTeX for uncertainty tables.
<END_TASK>
<USER_TASK:>
Description:
def latexify_u3col(obj, **kwargs):
"""Convert an object to special LaTeX for uncertainty tables.
This conversion is meant for uncertain values in a table. The return value
should span three columns. The first column ends just before the decimal
point in the main number value, if it has one. It has no separation from
the second column. The second column goes from the decimal point until
just before the "plus-or-minus" indicator. The third column goes from the
"plus-or-minus" until the end. If the item being formatted does not fit this
schema, it can be wrapped in something like '\multicolumn{3}{c}{...}'.
""" |
if hasattr(obj, '__pk_latex_u3col__'):
return obj.__pk_latex_u3col__(**kwargs)
# TODO: there are reasonable ways to format many basic types, but I'm not
# going to implement them until I need to.
raise ValueError('can\'t LaTeXify %r in 3-column uncertain format' % obj) |
<SYSTEM_TASK:>
Convert an object to special LaTeX for limit tables.
<END_TASK>
<USER_TASK:>
Description:
def latexify_l3col(obj, **kwargs):
"""Convert an object to special LaTeX for limit tables.
This conversion is meant for limit values in a table. The return value
should span three columns. The first column is the limit indicator: <, >,
~, etc. The second column is the whole part of the value, up until just
before the decimal point. The third column is the decimal point and the
fractional part of the value, if present. If the item being formatted does
not fit this schema, it can be wrapped in something like
'\multicolumn{3}{c}{...}'.
""" |
if hasattr(obj, '__pk_latex_l3col__'):
return obj.__pk_latex_l3col__(**kwargs)
if isinstance(obj, bool):
# isinstance(True, int) = True, so gotta handle this first.
raise ValueError('no well-defined l3col LaTeXification of bool %r' % obj)
if isinstance(obj, float):
return '&' + latexify_n2col(obj, **kwargs)
if isinstance(obj, int):
return '& $%d$ &' % obj
raise ValueError('can\'t LaTeXify %r in 3-column limit format' % obj) |
<SYSTEM_TASK:>
Read a typed tabular text file into a stream of Holders.
<END_TASK>
<USER_TASK:>
Description:
def read (path, tabwidth=8, **kwargs):
"""Read a typed tabular text file into a stream of Holders.
Arguments:
path
The path of the file to read.
tabwidth=8
The tab width to assume. Please don't monkey with it.
mode='rt'
The file open mode (passed to io.open()).
noexistok=False
If True and the file is missing, treat it as empty.
``**kwargs``
Passed to io.open ().
Returns a generator for a stream of `pwkit.Holder`s, each of which will
contain ints, strings, or some kind of measurement (cf `pwkit.msmt`).
""" |
datamode = False
fixedcols = {}
for text in _trimmedlines (path, **kwargs):
text = text.expandtabs (tabwidth)
if datamode:
# table row
h = Holder ()
h.set (**fixedcols)
for name, cslice, parser in info:
try:
v = parser (text[cslice].strip ())
except:
reraise_context ('while parsing "%s"', text[cslice].strip ())
h.set_one (name, v)
yield h
elif text[0] != '@':
# fixed column
padnamekind, padval = text.split ('=', 1)
name, parser = _getparser (padnamekind.strip ())
fixedcols[name] = parser (padval.strip ())
else:
# column specification
n = len (text)
assert n > 1
start = 0
info = []
while start < n:
end = start + 1
while end < n and (not text[end].isspace ()):
end += 1
if start == 0:
namekind = text[start+1:end] # eat leading @
else:
namekind = text[start:end]
while end < n and text[end].isspace ():
end += 1
name, parser = _getparser (namekind)
if parser is None: # allow columns to be ignored
skippedlast = True
else:
skippedlast = False
info.append ((name, slice (start, end), parser))
start = end
datamode = True
if not skippedlast:
# make our last column go as long as the line goes
# (e.g. for "comments" columns)
# but if the real last column is ":x"-type, then info[-1]
# doesn't run up to the end of the line, so do nothing in that case.
lname, lslice, lparser = info[-1]
info[-1] = lname, slice (lslice.start, None), lparser |
<SYSTEM_TASK:>
Write a typed tabular text file to the specified stream.
<END_TASK>
<USER_TASK:>
Description:
def write (stream, items, fieldnames, tabwidth=8):
"""Write a typed tabular text file to the specified stream.
Arguments:
stream
The destination stream.
items
An iterable of items to write. Two passes have to
be made over the items (to discover the needed column widths),
so this will be saved into a list.
fieldnames
Either a list of field name strings, or a single string.
If the latter, it will be split into a list with .split().
tabwidth=8
The tab width to use. Please don't monkey with it.
Returns nothing.
""" |
if isinstance (fieldnames, six.string_types):
fieldnames = fieldnames.split ()
maxlens = [0] * len (fieldnames)
# We have to make two passes, so listify:
items = list (items)
# pass 1: get types and maximum lengths for each record. Pad by 1 to
# ensure there's at least one space between all columns.
coltypes = [None] * len (fieldnames)
for i in items:
for idx, fn in enumerate (fieldnames):
val = i.get (fn)
if val is None:
continue
typetag, text, inexact = msmt.fmtinfo (val)
maxlens[idx] = max (maxlens[idx], len (text) + 1)
if coltypes[idx] is None:
coltypes[idx] = typetag
continue
if coltypes[idx] == typetag:
continue
if coltypes[idx][-1] == 'f' and typetag[-1] == 'u':
# Can upcast floats to uvals
if coltypes[idx][:-1] == typetag[:-1]:
coltypes[idx] = coltypes[idx][:-1] + 'u'
continue
if coltypes[idx][-1] == 'u' and typetag[-1] == 'f':
if coltypes[idx][:-1] == typetag[:-1]:
continue
raise PKError ('irreconcilable column types: %s and %s', coltypes[idx], typetag)
# Compute column headers and their widths
headers = list (fieldnames)
headers[0] = '@' + headers[0]
for idx, fn in enumerate (fieldnames):
if coltypes[idx] != '':
headers[idx] += ':' + coltypes[idx]
maxlens[idx] = max (maxlens[idx], len (headers[idx]))
widths = [tabwidth * ((k + tabwidth - 1) // tabwidth) for k in maxlens]
# pass 2: write out
print (''.join (_tabpad (h, widths[idx], tabwidth)
for (idx, h) in enumerate (headers)), file=stream)
def ustr (i, f):
v = i.get (f)
if v is None:
return ''
return msmt.fmtinfo (v)[1]
for i in items:
print (''.join (_tabpad (ustr (i, fn), widths[idx], tabwidth)
for (idx, fn) in enumerate (fieldnames)), file=stream) |
<SYSTEM_TASK:>
Read a headerless tabular text file into a stream of Holders.
<END_TASK>
<USER_TASK:>
Description:
def vizread (descpath, descsection, tabpath, tabwidth=8, **kwargs):
"""Read a headerless tabular text file into a stream of Holders.
Arguments:
descpath
The path of the table description ini file.
descsection
The section in the description file to use.
tabpath
The path to the actual table data.
tabwidth=8
The tab width to assume. Please don't monkey with it.
mode='rt'
The table file open mode (passed to io.open()).
noexistok=False
If True and the file is missing, treat it as empty.
``**kwargs``
Passed to io.open ().
Returns a generator of a stream of `pwkit.Holder`s, each of which will
contain ints, strings, or some kind of measurement (cf `pwkit.msmt`). In
this version, the table file does not contain a header, as seen in Vizier
data files. The corresponding section in the description ini file has keys
of the form "colname = <start> <end> [type]", where <start> and <end> are
the **1-based** character numbers defining the column, and [type] is an
optional specified of the measurement type of the column (one of the usual
b, i, f, u, Lu, Pu).
""" |
from .inifile import read as iniread
cols = []
for i in iniread (descpath):
if i.section != descsection:
continue
for field, desc in six.iteritems (i.__dict__):
if field == 'section':
continue
a = desc.split ()
idx0 = int (a[0]) - 1
if len (a) == 1:
cols.append ((field, slice (idx0, idx0 + 1), msmt.parsers['s']))
continue
if len (a) == 2:
parser = msmt.parsers['s']
else:
parser = msmt.parsers[a[2]]
cols.append ((field, slice (idx0, int (a[1])), parser))
for text in _trimmedlines (tabpath, **kwargs):
text = text.expandtabs (tabwidth)
h = Holder ()
for name, cslice, parser in cols:
try:
v = parser (text[cslice].strip ())
except:
reraise_context ('while parsing "%s"', text[cslice].strip ())
h.set_one (name, v)
yield h |
<SYSTEM_TASK:>
Given array shapes `s1` and `s2`, compute the shape of the array that would
<END_TASK>
<USER_TASK:>
Description:
def _broadcast_shapes(s1, s2):
"""Given array shapes `s1` and `s2`, compute the shape of the array that would
result from broadcasting them together.""" |
n1 = len(s1)
n2 = len(s2)
n = max(n1, n2)
res = [1] * n
for i in range(n):
if i >= n1:
c1 = 1
else:
c1 = s1[n1-1-i]
if i >= n2:
c2 = 1
else:
c2 = s2[n2-1-i]
if c1 == 1:
rc = c2
elif c2 == 1 or c1 == c2:
rc = c1
else:
raise ValueError('array shapes %r and %r are not compatible' % (s1, s2))
res[n-1-i] = rc
return tuple(res) |
<SYSTEM_TASK:>
Print information about the model solution.
<END_TASK>
<USER_TASK:>
Description:
def print_soln(self):
"""Print information about the model solution.""" |
lmax = reduce(max,(len(x) for x in self.pnames), len('r chi sq'))
if self.puncerts is None:
for pn, val in zip(self.pnames, self.params):
print('%s: %14g' % (pn.rjust(lmax), val))
else:
for pn, val, err in zip(self.pnames, self.params, self.puncerts):
frac = abs(100. * err / val)
print('%s: %14g +/- %14g (%.2f%%)' % (pn.rjust(lmax), val, err, frac))
if self.rchisq is not None:
print('%s: %14g' % ('r chi sq'.rjust(lmax), self.rchisq))
elif self.chisq is not None:
print('%s: %14g' % ('chi sq'.rjust(lmax), self.chisq))
else:
print('%s: unknown/undefined' % ('r chi sq'.rjust(lmax)))
return self |
<SYSTEM_TASK:>
Set the model function to use an efficient but tedious calling convention.
<END_TASK>
<USER_TASK:>
Description:
def set_func(self, func, pnames, args=()):
"""Set the model function to use an efficient but tedious calling convention.
The function should obey the following convention::
def func(param_vec, *args):
modeled_data = { do something using param_vec }
return modeled_data
This function creates the :class:`pwkit.lmmin.Problem` so that the
caller can futz with it before calling :meth:`solve`, if so desired.
Returns *self*.
""" |
from .lmmin import Problem
self.func = func
self._args = args
self.pnames = list(pnames)
self.lm_prob = Problem(len(self.pnames))
return self |
<SYSTEM_TASK:>
Set the model function to use a simple but somewhat inefficient calling
<END_TASK>
<USER_TASK:>
Description:
def set_simple_func(self, func, args=()):
"""Set the model function to use a simple but somewhat inefficient calling
convention.
The function should obey the following convention::
def func(param0, param1, ..., paramN, *args):
modeled_data = { do something using the parameters }
return modeled_data
Returns *self*.
""" |
code = get_function_code(func)
npar = code.co_argcount - len(args)
pnames = code.co_varnames[:npar]
def wrapper(params, *args):
return func(*(tuple(params) + args))
return self.set_func(wrapper, pnames, args) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.