text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Get the broadcasts associated with a set of tables.
<END_TASK>
<USER_TASK:>
Description:
def _get_broadcasts(tables):
"""
Get the broadcasts associated with a set of tables.
Parameters
----------
tables : sequence of str
Table names for which broadcasts have been registered.
Returns
-------
casts : dict of `Broadcast`
Keys are tuples of strings like (cast_name, onto_name).
""" |
tables = set(tables)
casts = tz.keyfilter(
lambda x: x[0] in tables and x[1] in tables, _BROADCASTS)
if tables - set(tz.concat(casts.keys())):
raise ValueError('Not enough links to merge all tables.')
return casts |
<SYSTEM_TASK:>
Get a single broadcast.
<END_TASK>
<USER_TASK:>
Description:
def get_broadcast(cast_name, onto_name):
"""
Get a single broadcast.
Broadcasts are stored data about how to do a Pandas join.
A Broadcast object is a namedtuple with these attributes:
- cast: the name of the table being broadcast
- onto: the name of the table onto which "cast" is broadcast
- cast_on: The optional name of a column on which to join.
None if the table index will be used instead.
- onto_on: The optional name of a column on which to join.
None if the table index will be used instead.
- cast_index: True if the table index should be used for the join.
- onto_index: True if the table index should be used for the join.
Parameters
----------
cast_name : str
The name of the table being braodcast.
onto_name : str
The name of the table onto which `cast_name` is broadcast.
Returns
-------
broadcast : Broadcast
""" |
if is_broadcast(cast_name, onto_name):
return _BROADCASTS[(cast_name, onto_name)]
else:
raise KeyError(
'no rule found for broadcasting {!r} onto {!r}'.format(
cast_name, onto_name)) |
<SYSTEM_TASK:>
A generator that provides all the names of tables that can be
<END_TASK>
<USER_TASK:>
Description:
def _all_reachable_tables(t):
"""
A generator that provides all the names of tables that can be
reached via merges starting at the given target table.
""" |
for k, v in t.items():
for tname in _all_reachable_tables(v):
yield tname
yield k |
<SYSTEM_TASK:>
Descend into a dict of dicts to return the one that contains
<END_TASK>
<USER_TASK:>
Description:
def _recursive_getitem(d, key):
"""
Descend into a dict of dicts to return the one that contains
a given key. Every value in the dict must be another dict.
""" |
if key in d:
return d
else:
for v in d.values():
return _recursive_getitem(v, key)
else:
raise KeyError('Key not found: {}'.format(key)) |
<SYSTEM_TASK:>
Gets a node that has only leaf nodes below it. This table and
<END_TASK>
<USER_TASK:>
Description:
def _next_merge(merge_node):
"""
Gets a node that has only leaf nodes below it. This table and
the ones below are ready to be merged to make a new leaf node.
""" |
if all(_is_leaf_node(d) for d in _dict_value_to_pairs(merge_node)):
return merge_node
else:
for d in tz.remove(_is_leaf_node, _dict_value_to_pairs(merge_node)):
return _next_merge(d)
else:
raise OrcaError('No node found for next merge.') |
<SYSTEM_TASK:>
Returns a list of table names injected into the provided steps.
<END_TASK>
<USER_TASK:>
Description:
def get_step_table_names(steps):
"""
Returns a list of table names injected into the provided steps.
Parameters
----------
steps: list of str
Steps to gather table inputs from.
Returns
-------
list of str
""" |
table_names = set()
for s in steps:
table_names |= get_step(s)._tables_used()
return list(table_names) |
<SYSTEM_TASK:>
Writes tables to a pandas.HDFStore file.
<END_TASK>
<USER_TASK:>
Description:
def write_tables(fname, table_names=None, prefix=None, compress=False, local=False):
"""
Writes tables to a pandas.HDFStore file.
Parameters
----------
fname : str
File name for HDFStore. Will be opened in append mode and closed
at the end of this function.
table_names: list of str, optional, default None
List of tables to write. If None, all registered tables will
be written.
prefix: str
If not None, used to prefix the output table names so that
multiple iterations can go in the same file.
compress: boolean
Whether to compress output file using standard HDF5-readable
zlib compression, default False.
""" |
if table_names is None:
table_names = list_tables()
tables = (get_table(t) for t in table_names)
key_template = '{}/{{}}'.format(prefix) if prefix is not None else '{}'
# set compression options to zlib level-1 if compress arg is True
complib = compress and 'zlib' or None
complevel = compress and 1 or 0
with pd.HDFStore(fname, mode='a', complib=complib, complevel=complevel) as store:
for t in tables:
# if local arg is True, store only local columns
columns = None
if local is True:
columns = t.local_columns
store[key_template.format(t.name)] = t.to_frame(columns=columns) |
<SYSTEM_TASK:>
Run steps in series, optionally repeatedly over some sequence.
<END_TASK>
<USER_TASK:>
Description:
def run(steps, iter_vars=None, data_out=None, out_interval=1,
out_base_tables=None, out_run_tables=None, compress=False,
out_base_local=True, out_run_local=True):
"""
Run steps in series, optionally repeatedly over some sequence.
The current iteration variable is set as a global injectable
called ``iter_var``.
Parameters
----------
steps : list of str
List of steps to run identified by their name.
iter_vars : iterable, optional
The values of `iter_vars` will be made available as an injectable
called ``iter_var`` when repeatedly running `steps`.
data_out : str, optional
An optional filename to which all tables injected into any step
in `steps` will be saved every `out_interval` iterations.
File will be a pandas HDF data store.
out_interval : int, optional
Iteration interval on which to save data to `data_out`. For example,
2 will save out every 2 iterations, 5 every 5 iterations.
Default is every iteration.
The results of the first and last iterations are always included.
The input (base) tables are also included and prefixed with `base/`,
these represent the state of the system before any steps have been
executed.
The interval is defined relative to the first iteration. For example,
a run begining in 2015 with an out_interval of 2, will write out
results for 2015, 2017, etc.
out_base_tables: list of str, optional, default None
List of base tables to write. If not provided, tables injected
into 'steps' will be written.
out_run_tables: list of str, optional, default None
List of run tables to write. If not provided, tables injected
into 'steps' will be written.
compress: boolean, optional, default False
Whether to compress output file using standard HDF5 zlib compression.
Compression yields much smaller files using slightly more CPU.
out_base_local: boolean, optional, default True
For tables in out_base_tables, whether to store only local columns (True)
or both, local and computed columns (False).
out_run_local: boolean, optional, default True
For tables in out_run_tables, whether to store only local columns (True)
or both, local and computed columns (False).
""" |
iter_vars = iter_vars or [None]
max_i = len(iter_vars)
# get the tables to write out
if out_base_tables is None or out_run_tables is None:
step_tables = get_step_table_names(steps)
if out_base_tables is None:
out_base_tables = step_tables
if out_run_tables is None:
out_run_tables = step_tables
# write out the base (inputs)
if data_out:
add_injectable('iter_var', iter_vars[0])
write_tables(data_out, out_base_tables, 'base', compress=compress, local=out_base_local)
# run the steps
for i, var in enumerate(iter_vars, start=1):
add_injectable('iter_var', var)
if var is not None:
print('Running iteration {} with iteration value {!r}'.format(
i, var))
logger.debug(
'running iteration {} with iteration value {!r}'.format(
i, var))
t1 = time.time()
for j, step_name in enumerate(steps):
add_injectable('iter_step', iter_step(j, step_name))
print('Running step {!r}'.format(step_name))
with log_start_finish(
'run step {!r}'.format(step_name), logger,
logging.INFO):
step = get_step(step_name)
t2 = time.time()
step()
print("Time to execute step '{}': {:.2f} s".format(
step_name, time.time() - t2))
clear_cache(scope=_CS_STEP)
print(
('Total time to execute iteration {} '
'with iteration value {!r}: '
'{:.2f} s').format(i, var, time.time() - t1))
# write out the results for the current iteration
if data_out:
if (i - 1) % out_interval == 0 or i == max_i:
write_tables(data_out, out_run_tables, var, compress=compress, local=out_run_local)
clear_cache(scope=_CS_ITER) |
<SYSTEM_TASK:>
Temporarily add injectables to the pipeline environment.
<END_TASK>
<USER_TASK:>
Description:
def injectables(**kwargs):
"""
Temporarily add injectables to the pipeline environment.
Takes only keyword arguments.
Injectables will be returned to their original state when the context
manager exits.
""" |
global _INJECTABLES
original = _INJECTABLES.copy()
_INJECTABLES.update(kwargs)
yield
_INJECTABLES = original |
<SYSTEM_TASK:>
Temporarily set DataFrames as registered tables.
<END_TASK>
<USER_TASK:>
Description:
def temporary_tables(**kwargs):
"""
Temporarily set DataFrames as registered tables.
Tables will be returned to their original state when the context
manager exits. Caching is not enabled for tables registered via
this function.
""" |
global _TABLES
original = _TABLES.copy()
for k, v in kwargs.items():
if not isinstance(v, pd.DataFrame):
raise ValueError('tables only accepts DataFrames')
add_table(k, v)
yield
_TABLES = original |
<SYSTEM_TASK:>
Execute a single variable function registered with Orca
<END_TASK>
<USER_TASK:>
Description:
def eval_variable(name, **kwargs):
"""
Execute a single variable function registered with Orca
and return the result. Any keyword arguments are temporarily set
as injectables. This gives the value as would be injected into a function.
Parameters
----------
name : str
Name of variable to evaluate.
Use variable expressions to specify columns.
Returns
-------
object
For injectables and columns this directly returns whatever
object is returned by the registered function.
For tables this returns a DataFrameWrapper as if the table
had been injected into a function.
""" |
with injectables(**kwargs):
vars = _collect_variables([name], [name])
return vars[name] |
<SYSTEM_TASK:>
Make a DataFrame with the given columns.
<END_TASK>
<USER_TASK:>
Description:
def to_frame(self, columns=None):
"""
Make a DataFrame with the given columns.
Will always return a copy of the underlying table.
Parameters
----------
columns : sequence or string, optional
Sequence of the column names desired in the DataFrame. A string
can also be passed if only one column is desired.
If None all columns are returned, including registered columns.
Returns
-------
frame : pandas.DataFrame
""" |
extra_cols = _columns_for_table(self.name)
if columns is not None:
columns = [columns] if isinstance(columns, str) else columns
columns = set(columns)
set_extra_cols = set(extra_cols)
local_cols = set(self.local.columns) & columns - set_extra_cols
df = self.local[list(local_cols)].copy()
extra_cols = {k: extra_cols[k] for k in (columns & set_extra_cols)}
else:
df = self.local.copy()
with log_start_finish(
'computing {!r} columns for table {!r}'.format(
len(extra_cols), self.name),
logger):
for name, col in extra_cols.items():
with log_start_finish(
'computing column {!r} for table {!r}'.format(
name, self.name),
logger):
df[name] = col()
return df |
<SYSTEM_TASK:>
Add or replace a column in the underlying DataFrame.
<END_TASK>
<USER_TASK:>
Description:
def update_col(self, column_name, series):
"""
Add or replace a column in the underlying DataFrame.
Parameters
----------
column_name : str
Column to add or replace.
series : pandas.Series or sequence
Column data.
""" |
logger.debug('updating column {!r} in table {!r}'.format(
column_name, self.name))
self.local[column_name] = series |
<SYSTEM_TASK:>
Report column type as one of 'local', 'series', or 'function'.
<END_TASK>
<USER_TASK:>
Description:
def column_type(self, column_name):
"""
Report column type as one of 'local', 'series', or 'function'.
Parameters
----------
column_name : str
Returns
-------
col_type : {'local', 'series', 'function'}
'local' means that the column is part of the registered table,
'series' means the column is a registered Pandas Series,
and 'function' means the column is a registered function providing
a Pandas Series.
""" |
extra_cols = list_columns_for_table(self.name)
if column_name in extra_cols:
col = _COLUMNS[(self.name, column_name)]
if isinstance(col, _SeriesWrapper):
return 'series'
elif isinstance(col, _ColumnFuncWrapper):
return 'function'
elif column_name in self.local_columns:
return 'local'
raise KeyError('column {!r} not found'.format(column_name)) |
<SYSTEM_TASK:>
Update existing values in a column from another series.
<END_TASK>
<USER_TASK:>
Description:
def update_col_from_series(self, column_name, series, cast=False):
"""
Update existing values in a column from another series.
Index values must match in both column and series. Optionally
casts data type to match the existing column.
Parameters
---------------
column_name : str
series : panas.Series
cast: bool, optional, default False
""" |
logger.debug('updating column {!r} in table {!r}'.format(
column_name, self.name))
col_dtype = self.local[column_name].dtype
if series.dtype != col_dtype:
if cast:
series = series.astype(col_dtype)
else:
err_msg = "Data type mismatch, existing:{}, update:{}"
err_msg = err_msg.format(col_dtype, series.dtype)
raise ValueError(err_msg)
self.local.loc[series.index, column_name] = series |
<SYSTEM_TASK:>
Remove cached results from this table's computed columns.
<END_TASK>
<USER_TASK:>
Description:
def clear_cached(self):
"""
Remove cached results from this table's computed columns.
""" |
_TABLE_CACHE.pop(self.name, None)
for col in _columns_for_table(self.name).values():
col.clear_cached()
logger.debug('cleared cached columns for table {!r}'.format(self.name)) |
<SYSTEM_TASK:>
Call the wrapped function and return the result wrapped by
<END_TASK>
<USER_TASK:>
Description:
def _call_func(self):
"""
Call the wrapped function and return the result wrapped by
DataFrameWrapper.
Also updates attributes like columns, index, and length.
""" |
if _CACHING and self.cache and self.name in _TABLE_CACHE:
logger.debug('returning table {!r} from cache'.format(self.name))
return _TABLE_CACHE[self.name].value
with log_start_finish(
'call function to get frame for table {!r}'.format(
self.name),
logger):
kwargs = _collect_variables(names=self._argspec.args,
expressions=self._argspec.defaults)
frame = self._func(**kwargs)
self._columns = list(frame.columns)
self._index = frame.index
self._len = len(frame)
wrapped = DataFrameWrapper(self.name, frame, copy_col=self.copy_col)
if self.cache:
_TABLE_CACHE[self.name] = CacheItem(
self.name, wrapped, self.cache_scope)
return wrapped |
<SYSTEM_TASK:>
Remove any cached result of this column.
<END_TASK>
<USER_TASK:>
Description:
def clear_cached(self):
"""
Remove any cached result of this column.
""" |
x = _COLUMN_CACHE.pop((self.table_name, self.name), None)
if x is not None:
logger.debug(
'cleared cached value for column {!r} in table {!r}'.format(
self.name, self.table_name)) |
<SYSTEM_TASK:>
Clear a cached result for this injectable.
<END_TASK>
<USER_TASK:>
Description:
def clear_cached(self):
"""
Clear a cached result for this injectable.
""" |
x = _INJECTABLE_CACHE.pop(self.name, None)
if x:
logger.debug(
'injectable {!r} removed from cache'.format(self.name)) |
<SYSTEM_TASK:>
Tables injected into the step.
<END_TASK>
<USER_TASK:>
Description:
def _tables_used(self):
"""
Tables injected into the step.
Returns
-------
tables : set of str
""" |
args = list(self._argspec.args)
if self._argspec.defaults:
default_args = list(self._argspec.defaults)
else:
default_args = []
# Combine names from argument names and argument default values.
names = args[:len(args) - len(default_args)] + default_args
tables = set()
for name in names:
parent_name = name.split('.')[0]
if is_table(parent_name):
tables.add(parent_name)
return tables |
<SYSTEM_TASK:>
Given a graph, nodes to explore and an optinal root, do a breadth-first
<END_TASK>
<USER_TASK:>
Description:
def qbe_tree(graph, nodes, root=None):
"""
Given a graph, nodes to explore and an optinal root, do a breadth-first
search in order to return the tree.
""" |
if root:
start = root
else:
index = random.randint(0, len(nodes) - 1)
start = nodes[index]
# A queue to BFS instead DFS
to_visit = deque()
cnodes = copy(nodes)
visited = set()
# Format is (parent, parent_edge, neighbor, neighbor_field)
to_visit.append((None, None, start, None))
tree = {}
while len(to_visit) != 0 and nodes:
parent, parent_edge, v, v_edge = to_visit.pop()
# Prune
if v in nodes:
nodes.remove(v)
node = graph[v]
if v not in visited and len(node) > 1:
visited.add(v)
# Preorder process
if all((parent, parent_edge, v, v_edge)):
if parent not in tree:
tree[parent] = []
if (parent_edge, v, v_edge) not in tree[parent]:
tree[parent].append((parent_edge, v, v_edge))
if v not in tree:
tree[v] = []
if (v_edge, parent, parent_edge) not in tree[v]:
tree[v].append((v_edge, parent, parent_edge))
# Iteration
for node_edge, neighbor, neighbor_edge in node:
value = (v, node_edge, neighbor, neighbor_edge)
to_visit.append(value)
remove_leafs(tree, cnodes)
return tree, (len(nodes) == 0) |
<SYSTEM_TASK:>
Create a matrix in wich each row is a tuple containing one of solutions or
<END_TASK>
<USER_TASK:>
Description:
def combine(items, k=None):
"""
Create a matrix in wich each row is a tuple containing one of solutions or
solution k-esima.
""" |
length_items = len(items)
lengths = [len(i) for i in items]
length = reduce(lambda x, y: x * y, lengths)
repeats = [reduce(lambda x, y: x * y, lengths[i:])
for i in range(1, length_items)] + [1]
if k is not None:
k = k % length
# Python division by default is integer division (~ floor(a/b))
indices = [old_div((k % (lengths[i] * repeats[i])), repeats[i])
for i in range(length_items)]
return [items[i][indices[i]] for i in range(length_items)]
else:
matrix = []
for i, item in enumerate(items):
row = []
for subset in item:
row.extend([subset] * repeats[i])
times = old_div(length, len(row))
matrix.append(row * times)
# Transpose the matrix or return the columns instead rows
return list(zip(*matrix)) |
<SYSTEM_TASK:>
Return data about a function source, including file name,
<END_TASK>
<USER_TASK:>
Description:
def func_source_data(func):
"""
Return data about a function source, including file name,
line number, and source code.
Parameters
----------
func : object
May be anything support by the inspect module, such as a function,
method, or class.
Returns
-------
filename : str
lineno : int
The line number on which the function starts.
source : str
""" |
filename = inspect.getsourcefile(func)
lineno = inspect.getsourcelines(func)[1]
source = inspect.getsource(func)
return filename, lineno, source |
<SYSTEM_TASK:>
Checks that there is almost one field to select
<END_TASK>
<USER_TASK:>
Description:
def clean(self):
"""
Checks that there is almost one field to select
""" |
if any(self.errors):
# Don't bother validating the formset unless each form is valid on
# its own
return
(selects, aliases, froms, wheres, sorts, groups_by,
params) = self.get_query_parts()
if not selects:
validation_message = _(u"At least you must check a row to get.")
raise forms.ValidationError(validation_message)
self._selects = selects
self._aliases = aliases
self._froms = froms
self._wheres = wheres
self._sorts = sorts
self._groups_by = groups_by
self._params = params |
<SYSTEM_TASK:>
Fetch all results after perform SQL query and
<END_TASK>
<USER_TASK:>
Description:
def get_results(self, limit=None, offset=None, query=None, admin_name=None,
row_number=False):
"""
Fetch all results after perform SQL query and
""" |
add_extra_ids = (admin_name is not None)
if not query:
sql = self.get_raw_query(limit=limit, offset=offset,
add_extra_ids=add_extra_ids)
else:
sql = query
if settings.DEBUG:
print(sql)
cursor = self._db_connection.cursor()
cursor.execute(sql, tuple(self._params))
query_results = cursor.fetchall()
if admin_name and not self._groups_by:
selects = self._get_selects_with_extra_ids()
results = []
try:
offset = int(offset)
except ValueError:
offset = 0
for r, row in enumerate(query_results):
i = 0
l = len(row)
if row_number:
result = [(r + offset + 1, u"#row%s" % (r + offset + 1))]
else:
result = []
while i < l:
appmodel, field = selects[i].split(".")
appmodel = self._unquote_name(appmodel)
field = self._unquote_name(field)
try:
if appmodel in self._models:
_model = self._models[appmodel]
_appmodel = u"%s_%s" % (_model._meta.app_label,
_model._meta.model_name)
else:
_appmodel = appmodel
admin_url = reverse("%s:%s_change" % (
admin_name,
_appmodel),
args=[row[i + 1]]
)
except NoReverseMatch:
admin_url = None
result.append((row[i], admin_url))
i += 2
results.append(result)
return results
else:
if row_number:
results = []
for r, row in enumerate(query_results):
result = [r + 1]
for cell in row:
result.append(cell)
results.append(result)
return results
else:
return query_results |
<SYSTEM_TASK:>
Return a tuple of content type and charset.
<END_TASK>
<USER_TASK:>
Description:
def parse_content_type(content_type):
"""
Return a tuple of content type and charset.
:param content_type: A string describing a content type.
""" |
if '; charset=' in content_type:
return tuple(content_type.split('; charset='))
else:
if 'text' in content_type:
encoding = 'ISO-8859-1'
else:
try:
format = formats.find_by_content_type(content_type)
except formats.UnknownFormat:
encoding = 'ISO-8859-1'
else:
encoding = format.default_encoding or 'ISO-8859-1'
return (content_type, encoding) |
<SYSTEM_TASK:>
Return a list of content types listed in the HTTP Accept header
<END_TASK>
<USER_TASK:>
Description:
def parse_http_accept_header(header):
"""
Return a list of content types listed in the HTTP Accept header
ordered by quality.
:param header: A string describing the contents of the HTTP Accept header.
""" |
components = [item.strip() for item in header.split(',')]
l = []
for component in components:
if ';' in component:
subcomponents = [item.strip() for item in component.split(';')]
l.append(
(
subcomponents[0], # eg. 'text/html'
subcomponents[1][2:] # eg. 'q=0.9'
)
)
else:
l.append((component, '1'))
l.sort(
key = lambda i: i[1],
reverse = True
)
content_types = []
for i in l:
content_types.append(i[0])
return content_types |
<SYSTEM_TASK:>
Parse a request with multipart data.
<END_TASK>
<USER_TASK:>
Description:
def parse_multipart_data(request):
"""
Parse a request with multipart data.
:param request: A HttpRequest instance.
""" |
return MultiPartParser(
META=request.META,
input_data=StringIO(request.body),
upload_handlers=request.upload_handlers,
encoding=request.encoding
).parse() |
<SYSTEM_TASK:>
Override the views class' supported formats for the decorated function.
<END_TASK>
<USER_TASK:>
Description:
def override_supported_formats(formats):
"""
Override the views class' supported formats for the decorated function.
Arguments:
formats -- A list of strings describing formats, e.g. ``['html', 'json']``.
""" |
def decorator(function):
@wraps(function)
def wrapper(self, *args, **kwargs):
self.supported_formats = formats
return function(self, *args, **kwargs)
return wrapper
return decorator |
<SYSTEM_TASK:>
Route the decorated view.
<END_TASK>
<USER_TASK:>
Description:
def route(regex, method, name):
"""
Route the decorated view.
:param regex: A string describing a regular expression to which the request path will be matched.
:param method: A string describing the HTTP method that this view accepts.
:param name: A string describing the name of the URL pattern.
``regex`` may also be a lambda that accepts the parent resource's ``prefix`` argument and returns
a string describing a regular expression to which the request path will be matched.
``name`` may also be a lambda that accepts the parent resource's ``views`` argument and returns
a string describing the name of the URL pattern.
""" |
def decorator(function):
function.route = routes.route(
regex = regex,
view = function.__name__,
method = method,
name = name
)
@wraps(function)
def wrapper(self, *args, **kwargs):
return function(self, *args, **kwargs)
return wrapper
return decorator |
<SYSTEM_TASK:>
Run the given method prior to the decorated view.
<END_TASK>
<USER_TASK:>
Description:
def before(method_name):
"""
Run the given method prior to the decorated view.
If you return anything besides ``None`` from the given method,
its return values will replace the arguments of the decorated
view.
If you return an instance of ``HttpResponse`` from the given method,
Respite will return it immediately without delegating the request to the
decorated view.
Example usage::
class ArticleViews(Views):
@before('_load')
def show(self, request, article):
return self._render(
request = request,
template = 'show',
context = {
'article': article
}
)
def _load(self, request, id):
try:
return request, Article.objects.get(id=id)
except Article.DoesNotExist:
return self._error(request, 404, message='The article could not be found.')
:param method: A string describing a class method.
""" |
def decorator(function):
@wraps(function)
def wrapper(self, *args, **kwargs):
returns = getattr(self, method_name)(*args, **kwargs)
if returns is None:
return function(self, *args, **kwargs)
else:
if isinstance(returns, HttpResponse):
return returns
else:
return function(self, *returns)
return wrapper
return decorator |
<SYSTEM_TASK:>
Render a list of objects.
<END_TASK>
<USER_TASK:>
Description:
def index(self, request):
"""Render a list of objects.""" |
objects = self.model.objects.all()
return self._render(
request = request,
template = 'index',
context = {
cc2us(pluralize(self.model.__name__)): objects,
},
status = 200
) |
<SYSTEM_TASK:>
Render a form to create a new object.
<END_TASK>
<USER_TASK:>
Description:
def new(self, request):
"""Render a form to create a new object.""" |
form = (self.form or generate_form(self.model))()
return self._render(
request = request,
template = 'new',
context = {
'form': form
},
status = 200
) |
<SYSTEM_TASK:>
Render a form to edit an object.
<END_TASK>
<USER_TASK:>
Description:
def edit(self, request, id):
"""Render a form to edit an object.""" |
try:
object = self.model.objects.get(id=id)
except self.model.DoesNotExist:
return self._render(
request = request,
template = '404',
context = {
'error': 'The %s could not be found.' % self.model.__name__.lower()
},
status = 404,
prefix_template_path = False
)
form = (self.form or generate_form(self.model))(instance=object)
# Add "_method" field to override request method to PUT
form.fields['_method'] = CharField(required=True, initial='PUT', widget=HiddenInput)
return self._render(
request = request,
template = 'edit',
context = {
cc2us(self.model.__name__): object,
'form': form
},
status = 200
) |
<SYSTEM_TASK:>
Returns a Q object from filters config and actual parmeters.
<END_TASK>
<USER_TASK:>
Description:
def build_q(fields_dict, params_dict, request=None):
"""
Returns a Q object from filters config and actual parmeters.
""" |
# Building search query
# queries generated by different search_fields are ANDed
# if a search field is defined for more than one field, are put together with OR
and_query = Q()
for fieldname in fields_dict:
search_field = fields_dict[fieldname]
if fieldname in params_dict and params_dict[fieldname] != '' and params_dict[fieldname] != []:
or_query = None
if type(search_field) == type(list()):
field_list = search_field
search_operator = "__icontains"
fixed_filters = None
multiple_values = False
custom_query_method = None
value_mapper = None
else: # dictionary of field definitions
if search_field.get('ignore', False):
continue
field_list = search_field['fields']
search_operator = search_field.get('operator', None)
fixed_filters = search_field.get('fixed_filters', None)
multiple_values = search_field.get('multiple', False)
custom_query_method = search_field.get('custom_query', None)
value_mapper = search_field.get('value_mapper', None)
for model_field in field_list:
if multiple_values:
if hasattr(params_dict, "getlist"):
request_field_value = params_dict.getlist(fieldname)
elif type(params_dict[fieldname]) == list:
request_field_value = params_dict[fieldname]
else:
request_field_value = [params_dict[fieldname]]
if value_mapper:
request_field_value = [value_mapper(value) for value in request_field_value]
else:
request_field_value = params_dict[fieldname] if not value_mapper else value_mapper(params_dict[fieldname])
if not custom_query_method:
fieldname_key = model_field + search_operator
filter_dict = { fieldname_key : request_field_value}
if not or_query:
or_query = Q(**filter_dict)
else:
or_query = or_query | Q(**filter_dict)
else:
#TODO: this is a hack for using request data in custom_query
#it would be better to pass ALSO the request to custom_query_method
if not request:
cf = custom_query_method(model_field, request_field_value, params_dict)
else:
cf = custom_query_method(model_field, request_field_value, request)
if not or_query:
or_query = cf
else:
or_query = or_query | cf
#fixed_filters
fixed_filters_q = Q()
#fixed_filters must return a Q object or None
if fixed_filters:
if callable(fixed_filters):
fixed_filters_q = fixed_filters(params_dict)
elif type(fixed_filters) is dict:
fixed_filters_q = Q(**fixed_filters)
and_query = and_query & or_query
and_query = and_query & fixed_filters_q
return and_query |
<SYSTEM_TASK:>
Find and return a format by name, acronym or extension.
<END_TASK>
<USER_TASK:>
Description:
def find(identifier):
"""
Find and return a format by name, acronym or extension.
:param identifier: A string describing the format.
""" |
for format in FORMATS:
if identifier in [format.name, format.acronym, format.extension]:
return format
raise UnknownFormat('No format found with name, acronym or extension "%s"' % identifier) |
<SYSTEM_TASK:>
Find and return a format by name.
<END_TASK>
<USER_TASK:>
Description:
def find_by_name(name):
"""
Find and return a format by name.
:param name: A string describing the name of the format.
""" |
for format in FORMATS:
if name == format.name:
return format
raise UnknownFormat('No format found with name "%s"' % name) |
<SYSTEM_TASK:>
Find and return a format by extension.
<END_TASK>
<USER_TASK:>
Description:
def find_by_extension(extension):
"""
Find and return a format by extension.
:param extension: A string describing the extension of the format.
""" |
for format in FORMATS:
if extension in format.extensions:
return format
raise UnknownFormat('No format found with extension "%s"' % extension) |
<SYSTEM_TASK:>
Find and return a format by content type.
<END_TASK>
<USER_TASK:>
Description:
def find_by_content_type(content_type):
"""
Find and return a format by content type.
:param content_type: A string describing the internet media type of the format.
""" |
for format in FORMATS:
if content_type in format.content_types:
return format
raise UnknownFormat('No format found with content type "%s"' % content_type) |
<SYSTEM_TASK:>
List communication options.
<END_TASK>
<USER_TASK:>
Description:
def options(self, request, map, *args, **kwargs):
"""List communication options.""" |
options = {}
for method, function in map.items():
options[method] = function.__doc__
return self._render(
request = request,
template = 'options',
context = {
'options': options
},
status = 200,
headers = {
'Allow': ', '.join(options.keys())
}
) |
<SYSTEM_TASK:>
Determine and return a 'formats.Format' instance describing the most desired response format
<END_TASK>
<USER_TASK:>
Description:
def _get_format(self, request):
"""
Determine and return a 'formats.Format' instance describing the most desired response format
that is supported by these views.
:param request: A django.http.HttpRequest instance.
Formats specified by extension (e.g. '/articles/index.html') take precedence over formats
given in the HTTP Accept header, even if it's a format that isn't known by Respite.
If the request doesn't specify a format by extension (e.g. '/articles/' or '/articles/new')
and none of the formats in the HTTP Accept header are supported, Respite will fall back
on the format given in DEFAULT_FORMAT.
""" |
# Derive a list of 'formats.Format' instances from the list of formats these views support.
supported_formats = [formats.find(format) for format in self.supported_formats]
# Determine format by extension...
if '.' in request.path:
extension = request.path.split('.')[-1]
try:
format = formats.find_by_extension(extension)
except formats.UnknownFormat:
return None
if format in supported_formats:
return format
else:
return None
# Determine format by HTTP Accept header...
if 'HTTP_ACCEPT' in request.META:
content_types = parse_http_accept_header(request.META['HTTP_ACCEPT'])
# Only consider 'accept' headers with a single format in an attempt to play nice
# with browsers that ask for formats they really should not want.
if len(content_types) == 1:
content_type = content_types[0]
# If the request has no preference as to the format of its response, prefer the
# first of the view's supported formats.
if content_type == '*/*':
return supported_formats[0]
try:
format = formats.find_by_content_type(content_type)
except formats.UnknownFormat:
return None
if format in supported_formats:
return format
else:
return None
# If no format is given by either extension or header, default to the format given in
# RESPITE_DEFAULT_FORMAT (given, of course, that it's supported by the view).
if DEFAULT_FORMAT:
format = formats.find(DEFAULT_FORMAT)
if format in supported_formats:
return format
else:
return None |
<SYSTEM_TASK:>
Render a HTTP response.
<END_TASK>
<USER_TASK:>
Description:
def _render(self, request, template=None, status=200, context={}, headers={}, prefix_template_path=True):
"""
Render a HTTP response.
:param request: A django.http.HttpRequest instance.
:param template: A string describing the path to a template.
:param status: An integer describing the HTTP status code to respond with.
:param context: A dictionary describing variables to populate the template with.
:param headers: A dictionary describing HTTP headers.
:param prefix_template_path: A boolean describing whether to prefix the template with the view's template path.
Please note that ``template`` must not specify an extension, as one will be appended
according to the request format. For example, a value of ``blog/posts/index``
would populate ``blog/posts/index.html`` for requests that query the resource's
HTML representation.
If no template that matches the request format exists at the given location, or if ``template`` is ``None``,
Respite will attempt to serialize the template context automatically. You can change the way your models
are serialized by defining ``serialize`` methods that return a dictionary::
class NuclearMissile(models.Model):
serial_number = models.IntegerField()
is_armed = models.BooleanField()
launch_code = models.IntegerField()
def serialize(self):
return {
'serial_number': self.serial_number,
'is_armed': self.is_armed
}
If the request format is not supported by the view (as determined by the ``supported_formats``
property or a specific view's ``override_supported_formats`` decorator), this function will
yield HTTP 406 Not Acceptable.
""" |
format = self._get_format(request)
# Render 406 Not Acceptable if the requested format isn't supported.
if not format:
return HttpResponse(status=406)
if template:
if prefix_template_path:
template_path = '%s.%s' % (self.template_path + template, format.extension)
else:
template_path = '%s.%s' % (template, format.extension)
try:
response = render(
request = request,
template_name = template_path,
dictionary = context,
status = status,
content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET)
)
except TemplateDoesNotExist:
try:
response = HttpResponse(
content = serializers.find(format)(context).serialize(request),
content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET),
status = status
)
except serializers.UnknownSerializer:
raise self.Error(
'No template exists at %(template_path)s, and no serializer found for %(format)s' % {
'template_path': template_path,
'format': format
}
)
else:
response = HttpResponse(
content = serializers.find(format)(context).serialize(request),
content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET),
status = status
)
for header, value in headers.items():
response[header] = value
return response |
<SYSTEM_TASK:>
Convenience method to render an error response. The template is inferred from the status code.
<END_TASK>
<USER_TASK:>
Description:
def _error(self, request, status, headers={}, prefix_template_path=False, **kwargs):
"""
Convenience method to render an error response. The template is inferred from the status code.
:param request: A django.http.HttpRequest instance.
:param status: An integer describing the HTTP status code to respond with.
:param headers: A dictionary describing HTTP headers.
:param prefix_template_path: A boolean describing whether to prefix the template with the view's template path.
:param kwargs: Any additional keyword arguments to inject. These are wrapped under ``error`` for convenience.
For implementation details, see ``render``
""" |
return self._render(
request = request,
template = str(status),
status = status,
context = {
'error': kwargs
},
headers = headers,
prefix_template_path = prefix_template_path
) |
<SYSTEM_TASK:>
Find and return a serializer for the given format.
<END_TASK>
<USER_TASK:>
Description:
def find(format):
"""
Find and return a serializer for the given format.
Arguments:
format -- A Format instance.
""" |
try:
serializer = SERIALIZERS[format]
except KeyError:
raise UnknownSerializer('No serializer found for %s' % format.acronym)
return serializer |
<SYSTEM_TASK:>
Returns the keyword arguments for instantiating the search form.
<END_TASK>
<USER_TASK:>
Description:
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the search form.
""" |
update_data ={}
sfdict = self.filter_class.get_search_fields()
for fieldname in sfdict:
try:
has_multiple = sfdict[fieldname].get('multiple', False)
except:
has_multiple = False
if has_multiple:
value = self.request.GET.getlist(fieldname, [])
else:
value = self.request.GET.get(fieldname, None)
update_data[fieldname] = value
if self.order_field:
update_data[self.order_field] = self.request.GET.get(self.order_field, None)
initial = self.get_initial()
initial.update(update_data)
kwargs = {'initial': initial }
if self.groups_for_userlist != None:
pot_users = User.objects.exclude(id=self.request.user.id)
if len(self.groups_for_userlist):
pot_users = pot_users.filter(groups__name__in = self.groups_for_userlist)
pot_users = pot_users.distinct().order_by('username')
user_choices = tuple([(user.id, str(user)) for user in pot_users])
kwargs['user_choices'] = user_choices
return kwargs |
<SYSTEM_TASK:>
Pluralize an English noun.
<END_TASK>
<USER_TASK:>
Description:
def pluralize(word) :
"""Pluralize an English noun.""" |
rules = [
['(?i)(quiz)$' , '\\1zes'],
['^(?i)(ox)$' , '\\1en'],
['(?i)([m|l])ouse$' , '\\1ice'],
['(?i)(matr|vert|ind)ix|ex$' , '\\1ices'],
['(?i)(x|ch|ss|sh)$' , '\\1es'],
['(?i)([^aeiouy]|qu)ies$' , '\\1y'],
['(?i)([^aeiouy]|qu)y$' , '\\1ies'],
['(?i)(hive)$' , '\\1s'],
['(?i)(?:([^f])fe|([lr])f)$' , '\\1\\2ves'],
['(?i)sis$' , 'ses'],
['(?i)([ti])um$' , '\\1a'],
['(?i)(buffal|tomat)o$' , '\\1oes'],
['(?i)(bu)s$' , '\\1ses'],
['(?i)(alias|status)' , '\\1es'],
['(?i)(octop|vir)us$' , '\\1i'],
['(?i)(ax|test)is$' , '\\1es'],
['(?i)s$' , 's'],
['(?i)$' , 's']
]
uncountable_words = ['equipment', 'information', 'rice', 'money', 'species', 'series', 'fish', 'sheep']
irregular_words = {
'person' : 'people',
'man' : 'men',
'child' : 'children',
'sex' : 'sexes',
'move' : 'moves'
}
lower_cased_word = word.lower();
for uncountable_word in uncountable_words:
if lower_cased_word[-1*len(uncountable_word):] == uncountable_word :
return word
for irregular in irregular_words.keys():
match = re.search('('+irregular+')$',word, re.IGNORECASE)
if match:
return re.sub('(?i)'+irregular+'$', match.expand('\\1')[0]+irregular_words[irregular][1:], word)
for rule in range(len(rules)):
match = re.search(rules[rule][0], word, re.IGNORECASE)
if match :
groups = match.groups()
for k in range(0,len(groups)) :
if groups[k] == None :
rules[rule][1] = rules[rule][1].replace('\\'+str(k+1), '')
return re.sub(rules[rule][0], rules[rule][1], word)
return word |
<SYSTEM_TASK:>
Generate a form from a model.
<END_TASK>
<USER_TASK:>
Description:
def generate_form(model, form=None, fields=False, exclude=False):
"""
Generate a form from a model.
:param model: A Django model.
:param form: A Django form.
:param fields: A list of fields to include in this form.
:param exclude: A list of fields to exclude in this form.
""" |
_model, _fields, _exclude = model, fields, exclude
class Form(form or forms.ModelForm):
class Meta:
model = _model
if _fields is not False:
fields = _fields
if _exclude is not False:
exclude = _exclude
return Form |
<SYSTEM_TASK:>
Note that this function requires Scipy.
<END_TASK>
<USER_TASK:>
Description:
def sample_double_norm(mean, std_upper, std_lower, size):
"""Note that this function requires Scipy.""" |
from scipy.special import erfinv
# There's probably a better way to do this. We first draw percentiles
# uniformly between 0 and 1. We want the peak of the distribution to occur
# at `mean`. However, if we assign 50% of the samples to the lower half
# and 50% to the upper half, the side with the smaller variance will be
# overrepresented because of the 1/sigma normalization of the Gaussian
# PDF. Therefore we need to divide points between the two halves with a
# fraction `cutoff` (defined below) going to the lower half. Having
# partitioned them this way, we can then use the standard Gaussian
# quantile function to go from percentiles to sample values -- except that
# we must remap from [0, cutoff] to [0, 0.5] and from [cutoff, 1] to [0.5,
# 1].
samples = np.empty(size)
percentiles = np.random.uniform(0., 1., size)
cutoff = std_lower / (std_lower + std_upper)
w = (percentiles < cutoff)
percentiles[w] *= 0.5 / cutoff
samples[w] = mean + np.sqrt(2) * std_lower * erfinv(2 * percentiles[w] - 1)
w = ~w
percentiles[w] = 1 - (1 - percentiles[w]) * 0.5 / (1 - cutoff)
samples[w] = mean + np.sqrt(2) * std_upper * erfinv(2 * percentiles[w] - 1)
return samples |
<SYSTEM_TASK:>
Given a modal value and a standard deviation, compute corresponding
<END_TASK>
<USER_TASK:>
Description:
def find_gamma_params(mode, std):
"""Given a modal value and a standard deviation, compute corresponding
parameters for the gamma distribution.
Intended to be used to replace normal distributions when the value must be
positive and the uncertainty is comparable to the best value. Conversion
equations determined from the relations given in the sample_gamma()
docs.
""" |
if mode < 0:
raise ValueError('input mode must be positive for gamma; got %e' % mode)
var = std**2
beta = (mode + np.sqrt(mode**2 + 4 * var)) / (2 * var)
j = 2 * var / mode**2
alpha = (j + 1 + np.sqrt(2 * j + 1)) / j
if alpha <= 1:
raise ValueError('couldn\'t compute self-consistent gamma parameters: '
'mode=%e std=%e alpha=%e beta=%e' % (mode, std, alpha, beta))
return alpha, beta |
<SYSTEM_TASK:>
Compute the appropriate Lval "kind" for the limit of value `x` towards
<END_TASK>
<USER_TASK:>
Description:
def _lval_add_towards_polarity(x, polarity):
"""Compute the appropriate Lval "kind" for the limit of value `x` towards
`polarity`. Either 'toinf' or 'pastzero' depending on the sign of `x` and
the infinity direction of polarity.
""" |
if x < 0:
if polarity < 0:
return Lval('toinf', x)
return Lval('pastzero', x)
elif polarity > 0:
return Lval('toinf', x)
return Lval('pastzero', x) |
<SYSTEM_TASK:>
Return -1 if this value is some kind of upper limit, 1 if this value
<END_TASK>
<USER_TASK:>
Description:
def limtype(msmt):
"""Return -1 if this value is some kind of upper limit, 1 if this value
is some kind of lower limit, 0 otherwise.""" |
if np.isscalar(msmt):
return 0
if isinstance(msmt, Uval):
return 0
if isinstance(msmt, Lval):
if msmt.kind == 'undef':
raise ValueError('no simple limit type for Lval %r' % msmt)
# Quasi-hack here: limits of ('tozero', [positive number]) are
# reported as upper limits. In a plot full of fluxes this would be
# what makes sense, but note that this would be misleading if the
# quantity in question was something that could go negative.
p = msmt._polarity()
if p == -2 or p == 1:
return -1
if p == 2 or p == -1:
return 1
return 0
if isinstance(msmt, Textual):
return msmt.limtype()
raise ValueError('don\'t know how to treat %r as a measurement' % msmt) |
<SYSTEM_TASK:>
We assume a Poisson process. nevents is the number of events in
<END_TASK>
<USER_TASK:>
Description:
def from_pcount(nevents):
"""We assume a Poisson process. nevents is the number of events in
some interval. The distribution of values is the distribution of the
Poisson rate parameter given this observed number of events, where the
"rate" is in units of events per interval of the same duration. The
max-likelihood value is nevents, but the mean value is nevents + 1.
The gamma distribution is obtained by assuming an improper, uniform
prior for the rate between 0 and infinity.""" |
if nevents < 0:
raise ValueError('Poisson parameter `nevents` must be nonnegative')
return Uval(np.random.gamma(nevents + 1, size=uval_nsamples)) |
<SYSTEM_TASK:>
Compute representative statistical values for this Uval. `method`
<END_TASK>
<USER_TASK:>
Description:
def repvals(self, method):
"""Compute representative statistical values for this Uval. `method`
may be either 'pct' or 'gauss'.
Returns (best, plus_one_sigma, minus_one_sigma), where `best` is the
"best" value in some sense, and the others correspond to values at
the ~84 and 16 percentile limits, respectively. Because of the
sampled nature of the Uval system, there is no single method to
compute these numbers.
The "pct" method returns the 50th, 15.866th, and 84.134th percentile
values.
The "gauss" method computes the mean μ and standard deviation σ of the
samples and returns [μ, μ+σ, μ-σ].
""" |
if method == 'pct':
return pk_scoreatpercentile(self.d, [50., 84.134, 15.866])
if method == 'gauss':
m, s = self.d.mean(), self.d.std()
return np.asarray([m, m + s, m - s])
raise ValueError('unknown representative-value method "%s"' % method) |
<SYSTEM_TASK:>
Get a best-effort representative value as a float. This can be
<END_TASK>
<USER_TASK:>
Description:
def repval(self, limitsok=False):
"""Get a best-effort representative value as a float. This can be
DANGEROUS because it discards limit information, which is rarely wise.""" |
if not limitsok and self.dkind in ('lower', 'upper'):
raise LimitError()
if self.dkind == 'unif':
lower, upper = map(float, self.data)
v = 0.5 * (lower + upper)
elif self.dkind in _noextra_dkinds:
v = float(self.data)
elif self.dkind in _yesextra_dkinds:
v = float(self.data[0])
else:
raise RuntimeError('can\'t happen')
if self.tkind == 'log10':
return 10**v
return v |
<SYSTEM_TASK:>
Moreland's AdjustHue procedure to adjust the hue value of an Msh color
<END_TASK>
<USER_TASK:>
Description:
def moreland_adjusthue (msh, m_unsat):
"""Moreland's AdjustHue procedure to adjust the hue value of an Msh color
based on ... some criterion.
*msh* should be of of shape (3, ). *m_unsat* is a scalar.
Return value is the adjusted h (hue) value.
""" |
if msh[M] >= m_unsat:
return msh[H] # "Best we can do"
hspin = (msh[S] * np.sqrt (m_unsat**2 - msh[M]**2) /
(msh[M] * np.sin (msh[S])))
if msh[H] > -np.pi / 3: # "Spin away from purple"
return msh[H] + hspin
return msh[H] - hspin |
<SYSTEM_TASK:>
Get the node associated with each dataset. Some datasets
<END_TASK>
<USER_TASK:>
Description:
def get_datasets_in_nodes():
"""
Get the node associated with each dataset. Some datasets
will have an ambiguous node since they exists in more than
one node.
""" |
data_dir = os.path.join(scriptdir, "..", "usgs", "data")
cwic = map(lambda d: d["datasetName"], api.datasets(None, CWIC_LSI_EXPLORER_CATALOG_NODE)['data'])
ee = map(lambda d: d["datasetName"], api.datasets(None, EARTH_EXPLORER_CATALOG_NODE)['data'])
hdds = map(lambda d: d["datasetName"], api.datasets(None, HDDS_EXPLORER_CATALOG_NODE)['data'])
lpcs = map(lambda d: d["datasetName"], api.datasets(None, LPCS_EXPLORER_CATALOG_NODE)['data'])
# Create mapping from dataset to node
datasets = {}
datasets.update( { ds : "CWIC" for ds in cwic } )
datasets.update( { ds : "EE" for ds in ee } )
datasets.update( { ds : "HDDS" for ds in hdds } )
datasets.update( { ds : "LPCS" for ds in lpcs } )
datasets_path = os.path.join(data_dir, "datasets.json")
with open(datasets_path, "w") as f:
f.write(json.dumps(datasets))
# Find the datasets with ambiguous nodes
cwic_ee = [ds for ds in cwic if ds in ee]
cwic_hdds = [ds for ds in cwic if ds in hdds]
cwic_lpcs = [ds for ds in cwic if ds in lpcs]
ee_hdds = [ds for ds in ee if ds in hdds]
ee_lpcs = [ds for ds in ee if ds in lpcs]
hdds_lpcs = [ds for ds in hdds if ds in lpcs] |
<SYSTEM_TASK:>
Compute pivot wavelength assuming equal-energy convention.
<END_TASK>
<USER_TASK:>
Description:
def pivot_wavelength_ee(bpass):
"""Compute pivot wavelength assuming equal-energy convention.
`bpass` should have two properties, `resp` and `wlen`. The units of `wlen`
can be anything, and `resp` need not be normalized in any particular way.
""" |
from scipy.integrate import simps
return np.sqrt(simps(bpass.resp, bpass.wlen) /
simps(bpass.resp / bpass.wlen**2, bpass.wlen)) |
<SYSTEM_TASK:>
Get a Registry object pre-filled with information for standard
<END_TASK>
<USER_TASK:>
Description:
def get_std_registry():
"""Get a Registry object pre-filled with information for standard
telescopes.
""" |
from six import itervalues
reg = Registry()
for fn in itervalues(builtin_registrars):
fn(reg)
return reg |
<SYSTEM_TASK:>
Get the bandpass' pivot wavelength.
<END_TASK>
<USER_TASK:>
Description:
def pivot_wavelength(self):
"""Get the bandpass' pivot wavelength.
Unlike calc_pivot_wavelength(), this function will use a cached
value if available.
""" |
wl = self.registry._pivot_wavelengths.get((self.telescope, self.band))
if wl is not None:
return wl
wl = self.calc_pivot_wavelength()
self.registry.register_pivot_wavelength(self.telescope, self.band, wl)
return wl |
<SYSTEM_TASK:>
Calculate the wavelengths of the filter half-maximum values.
<END_TASK>
<USER_TASK:>
Description:
def calc_halfmax_points(self):
"""Calculate the wavelengths of the filter half-maximum values.
""" |
d = self._ensure_data()
return interpolated_halfmax_points(d.wlen, d.resp) |
<SYSTEM_TASK:>
Get the bandpass' half-maximum wavelengths. These can be used to
<END_TASK>
<USER_TASK:>
Description:
def halfmax_points(self):
"""Get the bandpass' half-maximum wavelengths. These can be used to
compute a representative bandwidth, or for display purposes.
Unlike calc_halfmax_points(), this function will use a cached value if
available.
""" |
t = self.registry._halfmaxes.get((self.telescope, self.band))
if t is not None:
return t
t = self.calc_halfmax_points()
self.registry.register_halfmaxes(self.telescope, self.band, t[0], t[1])
return t |
<SYSTEM_TASK:>
Return a list of bands associated with the specified telescope.
<END_TASK>
<USER_TASK:>
Description:
def bands(self, telescope):
"""Return a list of bands associated with the specified telescope.""" |
q = self._seen_bands.get(telescope)
if q is None:
return []
return list(q) |
<SYSTEM_TASK:>
Register precomputed pivot wavelengths.
<END_TASK>
<USER_TASK:>
Description:
def register_pivot_wavelength(self, telescope, band, wlen):
"""Register precomputed pivot wavelengths.""" |
if (telescope, band) in self._pivot_wavelengths:
raise AlreadyDefinedError('pivot wavelength for %s/%s already '
'defined', telescope, band)
self._note(telescope, band)
self._pivot_wavelengths[telescope,band] = wlen
return self |
<SYSTEM_TASK:>
Register precomputed half-max points.
<END_TASK>
<USER_TASK:>
Description:
def register_halfmaxes(self, telescope, band, lower, upper):
"""Register precomputed half-max points.""" |
if (telescope, band) in self._halfmaxes:
raise AlreadyDefinedError('half-max points for %s/%s already '
'defined', telescope, band)
self._note(telescope, band)
self._halfmaxes[telescope,band] = (lower, upper)
return self |
<SYSTEM_TASK:>
Get a Bandpass object for a known telescope and filter.
<END_TASK>
<USER_TASK:>
Description:
def get(self, telescope, band):
"""Get a Bandpass object for a known telescope and filter.""" |
klass = self._bpass_classes.get(telescope)
if klass is None:
raise NotDefinedError('bandpass data for %s not defined', telescope)
bp = klass()
bp.registry = self
bp.telescope = telescope
bp.band = band
return bp |
<SYSTEM_TASK:>
From the WISE All-Sky Explanatory Supplement, IV.4.h.i.1, and Jarrett+
<END_TASK>
<USER_TASK:>
Description:
def _load_data(self, band):
"""From the WISE All-Sky Explanatory Supplement, IV.4.h.i.1, and Jarrett+
2011. These are relative response per erg and so can be integrated
directly against F_nu spectra. Wavelengths are in micron,
uncertainties are in parts per thousand.
""" |
# `band` should be 1, 2, 3, or 4.
df = bandpass_data_frame('filter_wise_' + str(band) + '.dat', 'wlen resp uncert')
df.wlen *= 1e4 # micron to Angstrom
df.uncert *= df.resp / 1000. # parts per thou. to absolute values.
lo, hi = self._filter_subsets[band]
df = df[lo:hi] # clip zero parts of response.
return df |
<SYSTEM_TASK:>
Returns given comment HTML as plaintext.
<END_TASK>
<USER_TASK:>
Description:
def clean_comment_body(body):
"""Returns given comment HTML as plaintext.
Converts all HTML tags and entities within 4chan comments
into human-readable text equivalents.
""" |
body = _parser.unescape(body)
body = re.sub(r'<a [^>]+>(.+?)</a>', r'\1', body)
body = body.replace('<br>', '\n')
body = re.sub(r'<.+?>', '', body)
return body |
<SYSTEM_TASK:>
For compatibility between astropy and pywcs.
<END_TASK>
<USER_TASK:>
Description:
def _create_wcs (fitsheader):
"""For compatibility between astropy and pywcs.""" |
wcsmodule = _load_wcs_module ()
is_pywcs = hasattr (wcsmodule, 'UnitConverter')
wcs = wcsmodule.WCS (fitsheader)
wcs.wcs.set ()
wcs.wcs.fix () # I'm interested in MJD computation via datfix()
if hasattr (wcs, 'wcs_pix2sky'):
wcs.wcs_pix2world = wcs.wcs_pix2sky
wcs.wcs_world2pix = wcs.wcs_sky2pix
return wcs |
<SYSTEM_TASK:>
Safely pass string values to the CASA tools.
<END_TASK>
<USER_TASK:>
Description:
def sanitize_unicode(item):
"""Safely pass string values to the CASA tools.
item
A value to be passed to a CASA tool.
In Python 2, the bindings to CASA tasks expect to receive all string values
as binary data (:class:`str`) and not Unicode. But :mod:`pwkit` often uses
the ``from __future__ import unicode_literals`` statement to prepare for
Python 3 compatibility, and other Python modules are getting better about
using Unicode consistently, so more and more module code ends up using
Unicode strings in cases where they might get exposed to CASA. Doing so
will lead to errors.
This helper function converts Unicode into UTF-8 encoded bytes for
arguments that you might pass to a CASA tool. It will leave non-strings
unchanged and recursively transform collections, so you can safely use it
just about anywhere.
I usually import this as just ``b`` and write ``tool.method(b(arg))``, in
analogy with the ``b''`` byte string syntax. This leads to code such as::
from pwkit.environments.casa.util import tools, sanitize_unicode as b
tb = tools.table()
path = u'data.ms'
tb.open(path) # => raises exception
tb.open(b(path)) # => works
""" |
if isinstance(item, text_type):
return item.encode('utf8')
if isinstance(item, dict):
return dict((sanitize_unicode(k), sanitize_unicode(v)) for k, v in six.iteritems(item))
if isinstance(item,(list, tuple)):
return item.__class__(sanitize_unicode(x) for x in item)
from ...io import Path
if isinstance(item, Path):
return str(item)
return item |
<SYSTEM_TASK:>
Get a path within the CASA data directory.
<END_TASK>
<USER_TASK:>
Description:
def datadir(*subdirs):
"""Get a path within the CASA data directory.
subdirs
Extra elements to append to the returned path.
This function locates the directory where CASA resource data files (tables
of time offsets, calibrator models, etc.) are stored. If called with no
arguments, it simply returns that path. If arguments are provided, they are
appended to the returned path using :func:`os.path.join`, making it easy to
construct the names of specific data files. For instance::
from pwkit.environments.casa import util
cal_image_path = util.datadir('nrao', 'VLA', 'CalModels', '3C286_C.im')
tb = util.tools.image()
tb.open(cal_image_path)
""" |
import os.path
data = None
if 'CASAPATH' in os.environ:
data = os.path.join(os.environ['CASAPATH'].split()[0], 'data')
if data is None:
# The Conda CASA directory layout:
try:
import casadef
except ImportError:
pass
else:
data = os.path.join(os.path.dirname(casadef.task_directory), 'data')
if not os.path.isdir(data):
# Sigh, hack for CASA 4.7 + Conda; should be straightened out:
dn = os.path.dirname
data = os.path.join(dn(dn(dn(casadef.task_directory))), 'lib', 'casa', 'data')
if not os.path.isdir(data):
data = None
if data is None:
import casac
prevp = None
p = os.path.dirname(casac.__file__)
while len(p) and p != prevp:
data = os.path.join(p, 'data')
if os.path.isdir(data):
break
prevp = p
p = os.path.dirname(p)
if not os.path.isdir(data):
raise RuntimeError('cannot identify CASA data directory')
return os.path.join(data, *subdirs) |
<SYSTEM_TASK:>
Set up CASA to write log messages to standard output.
<END_TASK>
<USER_TASK:>
Description:
def logger(filter='WARN'):
"""Set up CASA to write log messages to standard output.
filter
The log level filter: less urgent messages will not be shown. Valid values
are strings: "DEBUG1", "INFO5", ... "INFO1", "INFO", "WARN", "SEVERE".
This function creates and returns a CASA ”log sink” object that is
configured to write to standard output. The default CASA implementation
would *always* create a file named ``casapy.log`` in the current
directory; this function safely prevents such a file from being left
around. This is particularly important if you don’t have write permissions
to the current directory.
""" |
import os, shutil, tempfile
cwd = os.getcwd()
tempdir = None
try:
tempdir = tempfile.mkdtemp(prefix='casautil')
try:
os.chdir(tempdir)
sink = tools.logsink()
sink.setlogfile(sanitize_unicode(os.devnull))
try:
os.unlink('casapy.log')
except OSError as e:
if e.errno != 2:
raise
# otherwise, it's a ENOENT, in which case, no worries.
finally:
os.chdir(cwd)
finally:
if tempdir is not None:
shutil.rmtree(tempdir, onerror=_rmtree_error)
sink.showconsole(True)
sink.setglobal(True)
sink.filter(sanitize_unicode(filter.upper()))
return sink |
<SYSTEM_TASK:>
Fork a child process and read its CASA log output.
<END_TASK>
<USER_TASK:>
Description:
def forkandlog(function, filter='INFO5', debug=False):
"""Fork a child process and read its CASA log output.
function
A function to run in the child process
filter
The CASA log level filter to apply in the child process: less urgent
messages will not be shown. Valid values are strings: "DEBUG1", "INFO5",
... "INFO1", "INFO", "WARN", "SEVERE".
debug
If true, the standard output and error of the child process are *not*
redirected to /dev/null.
Some CASA tools produce important results that are *only* provided via log
messages. This is a problem for automation, since there’s no way for
Python code to intercept those log messages and extract the results of
interest. This function provides a framework for working around this
limitation: by forking a child process and sending its log output to a
pipe, the parent process can capture the log messages.
This function is a generator. It yields lines from the child process’ CASA
log output.
Because the child process is a fork of the parent, it inherits a complete
clone of the parent’s state at the time of forking. That means that the
*function* argument you pass it can do just about anything you’d do in a
regular program.
The child process’ standard output and error streams are redirected to
``/dev/null`` unless the *debug* argument is true. Note that the CASA log
output is redirected to a pipe that is neither of these streams. So, if
the function raises an unhandled Python exception, the Python traceback
will not pollute the CASA log output. But, by the same token, the calling
program will not be able to detect that the exception occurred except by
its impact on the expected log output.
""" |
import sys, os
readfd, writefd = os.pipe()
pid = os.fork()
if pid == 0:
# Child process. We never leave this branch.
#
# Log messages of priority >WARN are sent to stderr regardless of the
# status of log.showconsole(). The idea is for this subprocess to be
# something super lightweight and constrained, so it seems best to
# nullify stderr, and stdout, to not pollute the output of the calling
# process.
#
# I thought of using the default logger() setup and dup2'ing stderr to
# the pipe fd, but then if anything else gets printed to stderr (e.g.
# Python exception info), it'll get sent along the pipe too. The
# caller would have to be much more complex to be able to detect and
# handle such output.
os.close(readfd)
if not debug:
f = open(os.devnull, 'w')
os.dup2(f.fileno(), 1)
os.dup2(f.fileno(), 2)
sink = logger(filter=filter)
sink.setlogfile(b'/dev/fd/%d' % writefd)
function(sink)
sys.exit(0)
# Original process.
os.close(writefd)
with os.fdopen(readfd) as readhandle:
for line in readhandle:
yield line
info = os.waitpid(pid, 0)
if info[1]:
# Because we're a generator, this is the only way for us to signal if
# the process died. We could be rewritten as a context manager.
e = RuntimeError('logging child process PID %d exited '
'with error code %d' % tuple(info))
e.pid, e.exitcode = info
raise e |
<SYSTEM_TASK:>
Parse metadata returned from the metadataUrl of a USGS scene.
<END_TASK>
<USER_TASK:>
Description:
def _get_extended(scene, resp):
"""
Parse metadata returned from the metadataUrl of a USGS scene.
:param scene:
Dictionary representation of a USGS scene
:param resp:
Response object from requests/grequests
""" |
root = ElementTree.fromstring(resp.text)
items = root.findall("eemetadata:metadataFields/eemetadata:metadataField", NAMESPACES)
scene['extended'] = {item.attrib.get('name').strip(): xsi.get(item[0]) for item in items}
return scene |
<SYSTEM_TASK:>
Sends multiple non-blocking requests. Returns
<END_TASK>
<USER_TASK:>
Description:
def _async_requests(urls):
"""
Sends multiple non-blocking requests. Returns
a list of responses.
:param urls:
List of urls
""" |
session = FuturesSession(max_workers=30)
futures = [
session.get(url)
for url in urls
]
return [ future.result() for future in futures ] |
<SYSTEM_TASK:>
Request metadata for a given scene in a USGS dataset.
<END_TASK>
<USER_TASK:>
Description:
def metadata(dataset, node, entityids, extended=False, api_key=None):
"""
Request metadata for a given scene in a USGS dataset.
:param dataset:
:param node:
:param entityids:
:param extended:
Send a second request to the metadata url to get extended metadata on the scene.
:param api_key:
""" |
api_key = _get_api_key(api_key)
url = '{}/metadata'.format(USGS_API)
payload = {
"jsonRequest": payloads.metadata(dataset, node, entityids, api_key=api_key)
}
r = requests.post(url, payload)
response = r.json()
_check_for_usgs_error(response)
if extended:
metadata_urls = map(_get_metadata_url, response['data'])
results = _async_requests(metadata_urls)
data = map(lambda idx: _get_extended(response['data'][idx], results[idx]), range(len(response['data'])))
return response |
<SYSTEM_TASK:>
Reraise an exception with its message modified to specify additional
<END_TASK>
<USER_TASK:>
Description:
def reraise_context(fmt, *args):
"""Reraise an exception with its message modified to specify additional
context.
This function tries to help provide context when a piece of code
encounters an exception while trying to get something done, and it wishes
to propagate contextual information farther up the call stack. It only
makes sense in Python 2, which does not provide Python 3’s `exception
chaining <https://www.python.org/dev/peps/pep-3134/>`_ functionality.
Instead of that more sophisticated infrastructure, this function just
modifies the textual message associated with the exception being raised.
If only a single argument is supplied, the exception text prepended with
the stringification of that argument. If multiple arguments are supplied,
the first argument is treated as an old-fashioned ``printf``-type
(``%``-based) format string, and the remaining arguments are the formatted
values.
Example usage::
from pwkit import reraise_context
from pwkit.io import Path
filename = 'my-filename.txt'
try:
f = Path(filename).open('rt')
for line in f.readlines():
# do stuff ...
except Exception as e:
reraise_context('while reading "%r"', filename)
# The exception is reraised and so control leaves this function.
If an exception with text ``"bad value"`` were to be raised inside the
``try`` block in the above example, its text would be modified to read
``"while reading \"my-filename.txt\": bad value"``.
""" |
import sys
if len(args):
cstr = fmt % args
else:
cstr = text_type(fmt)
ex = sys.exc_info()[1]
if isinstance(ex, EnvironmentError):
ex.strerror = '%s: %s' % (cstr, ex.strerror)
ex.args = (ex.errno, ex.strerror)
else:
if len(ex.args):
cstr = '%s: %s' % (cstr, ex.args[0])
ex.args = (cstr, ) + ex.args[1:]
raise |
<SYSTEM_TASK:>
Return a shallow copy of this object.
<END_TASK>
<USER_TASK:>
Description:
def copy(self):
"""Return a shallow copy of this object.
""" |
new = self.__class__()
new.__dict__ = dict(self.__dict__)
return new |
<SYSTEM_TASK:>
Get a thread from 4chan via 4chan API.
<END_TASK>
<USER_TASK:>
Description:
def get_thread(self, thread_id, update_if_cached=True, raise_404=False):
"""Get a thread from 4chan via 4chan API.
Args:
thread_id (int): Thread ID
update_if_cached (bool): Whether the thread should be updated if it's already in our cache
raise_404 (bool): Raise an Exception if thread has 404'd
Returns:
:class:`basc_py4chan.Thread`: Thread object
""" |
# see if already cached
cached_thread = self._thread_cache.get(thread_id)
if cached_thread:
if update_if_cached:
cached_thread.update()
return cached_thread
res = self._requests_session.get(
self._url.thread_api_url(
thread_id = thread_id
)
)
# check if thread exists
if raise_404:
res.raise_for_status()
elif not res.ok:
return None
thread = Thread._from_request(self, res, thread_id)
self._thread_cache[thread_id] = thread
return thread |
<SYSTEM_TASK:>
Check if a thread exists or has 404'd.
<END_TASK>
<USER_TASK:>
Description:
def thread_exists(self, thread_id):
"""Check if a thread exists or has 404'd.
Args:
thread_id (int): Thread ID
Returns:
bool: Whether the given thread exists on this board.
""" |
return self._requests_session.head(
self._url.thread_api_url(
thread_id=thread_id
)
).ok |
<SYSTEM_TASK:>
Returns all threads on a certain page.
<END_TASK>
<USER_TASK:>
Description:
def get_threads(self, page=1):
"""Returns all threads on a certain page.
Gets a list of Thread objects for every thread on the given page. If a thread is
already in our cache, the cached version is returned and thread.want_update is
set to True on the specific thread object.
Pages on 4chan are indexed from 1 onwards.
Args:
page (int): Page to request threads for. Defaults to the first page.
Returns:
list of :mod:`basc_py4chan.Thread`: List of Thread objects representing the threads on the given page.
""" |
url = self._url.page_url(page)
return self._request_threads(url) |
<SYSTEM_TASK:>
Return the ID of every thread on this board.
<END_TASK>
<USER_TASK:>
Description:
def get_all_thread_ids(self):
"""Return the ID of every thread on this board.
Returns:
list of ints: List of IDs of every thread on this board.
""" |
json = self._get_json(self._url.thread_list())
return [thread['no'] for page in json for thread in page['threads']] |
<SYSTEM_TASK:>
Return every thread on this board.
<END_TASK>
<USER_TASK:>
Description:
def get_all_threads(self, expand=False):
"""Return every thread on this board.
If not expanded, result is same as get_threads run across all board pages,
with last 3-5 replies included.
Uses the catalog when not expanding, and uses the flat thread ID listing
at /{board}/threads.json when expanding for more efficient resource usage.
If expanded, all data of all threads is returned with no omitted posts.
Args:
expand (bool): Whether to download every single post of every thread.
If enabled, this option can be very slow and bandwidth-intensive.
Returns:
list of :mod:`basc_py4chan.Thread`: List of Thread objects representing every thread on this board.
""" |
if not expand:
return self._request_threads(self._url.catalog())
thread_ids = self.get_all_thread_ids()
threads = [self.get_thread(id, raise_404=False) for id in thread_ids]
return filter(None, threads) |
<SYSTEM_TASK:>
Update all threads currently stored in our cache.
<END_TASK>
<USER_TASK:>
Description:
def refresh_cache(self, if_want_update=False):
"""Update all threads currently stored in our cache.""" |
for thread in tuple(self._thread_cache.values()):
if if_want_update:
if not thread.want_update:
continue
thread.update() |
<SYSTEM_TASK:>
Maintaining compatibility with different CASA versions is a pain.
<END_TASK>
<USER_TASK:>
Description:
def modify_environment(self, env):
"""Maintaining compatibility with different CASA versions is a pain.""" |
# Ugh. I don't see any way out of special-casing the RPM-based
# installations ... which only exist on NRAO computers, AFAICT.
# Hardcoding 64-bitness, hopefully that won't come back to bite me.
is_rpm_install = self._rootdir.startswith('/usr/lib64/casapy/release/')
def path(*args):
return os.path.join(self._rootdir, *args)
env['CASAROOT'] = path()
env['CASAPATH'] = ' '.join([path(),
os.uname()[0].lower(),
'local',
os.uname()[1]])
if is_rpm_install:
env['CASA_INSTALLATION_TYPE'] = 'rpm-installation'
prepend_environ_path(env, 'PATH', '/usr/lib64/casa/01/bin')
prepend_environ_path(env, 'PATH', path('bin'))
else:
env['CASA_INSTALLATION_TYPE'] = 'tar-installation'
lib = 'lib64' if os.path.isdir(path('lib64')) else 'lib'
# 4.3.1 comes with both python2.6 and python2.7???
pydir = sorted(glob.glob(path(lib, 'python2*')))[-1]
tcldir = path('share', 'tcl')
if os.path.isdir(tcldir):
env['TCL_LIBRARY'] = tcldir
else:
tcl_versioned_dirs = glob.glob(path('share', 'tcl*'))
if len(tcl_versioned_dirs):
env['TCL_LIBRARY'] = tcl_versioned_dirs[-1]
bindir = path(lib, 'casa', 'bin')
if not os.path.isdir(bindir):
bindir = path(lib, 'casapy', 'bin')
prepend_environ_path(env, 'PATH', bindir)
env['CASA_INSTALLATION_DIRECTORY'] = env['CASAROOT']
env['__CASAPY_PYTHONDIR'] = pydir
env['MATPLOTLIBRC'] = path('share', 'matplotlib')
env['PYTHONHOME'] = env['CASAROOT']
env['TK_LIBRARY'] = path('share', 'tk')
env['QT_PLUGIN_PATH'] = path(lib, 'qt4', 'plugins')
prepend_environ_path(env, 'LD_LIBRARY_PATH', path(lib))
# should we overwite PYTHONPATH instead?
prepend_environ_path(env, 'PYTHONPATH', os.path.join(pydir, 'site-packages'))
prepend_environ_path(env, 'PYTHONPATH', os.path.join(pydir, 'heuristics'))
prepend_environ_path(env, 'PYTHONPATH', pydir)
return env |
<SYSTEM_TASK:>
Compute background information for a source in one or more energy bands.
<END_TASK>
<USER_TASK:>
Description:
def compute_bgband (evtpath, srcreg, bkgreg, ebins, env=None):
"""Compute background information for a source in one or more energy bands.
evtpath
Path to a CIAO events file
srcreg
String specifying the source region to consider; use 'region(path.reg)' if you
have the region saved in a file.
bkgreg
String specifying the background region to consider; same format as srcreg
ebins
Iterable of 2-tuples giving low and high bounds of the energy bins to
consider, measured in eV.
env
An optional CiaoEnvironment instance; default settings are used if unspecified.
Returns a DataFrame containing at least the following columns:
elo
The low bound of this energy bin, in eV.
ehi
The high bound of this energy bin, in eV.
ewidth
The width of the bin in eV; simply `abs(ehi - elo)`.
nsrc
The number of events within the specified source region and energy range.
nbkg
The number of events within the specified background region and energy range.
nbkg_scaled
The number of background events scaled to the source area; not an integer.
nsrc_subbed
The estimated number of non-background events in the source region; simply
`nsrc - nbkg_scaled`.
log_prob_bkg
The logarithm of the probability that all counts in the source region are due
to background events.
src_sigma
The confidence of source detection in sigma inferred from log_prob_bkg.
The probability of backgrounditude is computed as:
b^s * exp (-b) / s!
where `b` is `nbkg_scaled` and `s` is `nsrc`. The confidence of source detection is
computed as:
sqrt(2) * erfcinv (prob_bkg)
where `erfcinv` is the inverse complementary error function.
""" |
import numpy as np
import pandas as pd
from scipy.special import erfcinv, gammaln
if env is None:
from . import CiaoEnvironment
env = CiaoEnvironment ()
srcarea = get_region_area (env, evtpath, srcreg)
bkgarea = get_region_area (env, evtpath, bkgreg)
srccounts = [count_events (env, evtpath, '[sky=%s][energy=%d:%d]' % (srcreg, elo, ehi))
for elo, ehi in ebins]
bkgcounts = [count_events (env, evtpath, '[sky=%s][energy=%d:%d]' % (bkgreg, elo, ehi))
for elo, ehi in ebins]
df = pd.DataFrame ({
'elo': [t[0] for t in ebins],
'ehi': [t[1] for t in ebins],
'nsrc': srccounts,
'nbkg': bkgcounts
})
df['ewidth'] = np.abs (df['ehi'] - df['elo'])
df['nbkg_scaled'] = df['nbkg'] * srcarea / bkgarea
df['log_prob_bkg'] = df['nsrc'] * np.log (df['nbkg_scaled']) - df['nbkg_scaled'] - gammaln (df['nsrc'] + 1)
df['src_sigma'] = np.sqrt (2) * erfcinv (np.exp (df['log_prob_bkg']))
df['nsrc_subbed'] = df['nsrc'] - df['nbkg_scaled']
return df |
<SYSTEM_TASK:>
Run the CIAO "srcflux" script and retrieve its results.
<END_TASK>
<USER_TASK:>
Description:
def simple_srcflux(env, infile=None, psfmethod='arfcorr', conf=0.68,
verbose=0, **kwargs):
"""Run the CIAO "srcflux" script and retrieve its results.
*infile*
The input events file; must be specified. The computation is done
in a temporary directory, so this path — and all others passed in
as arguments — **must be made absolute**.
*psfmethod* = "arfcorr"
The PSF modeling method to be used; see the "srcflux" documentation.
*conf* = 0.68
The confidence limit to detect. We default to 1 sigma, instead of
the 90% mark, which is the srcflux default.
*verbose* = 0
The level of verbosity to be used by the tool.
*kwargs*
Remaining keyword arguments are passed to the tool as command-line
keyword arguments, with values stringified.
Returns:
A :class:`pandas.DataFrame` extracted from the results table generated
by the tool. There is one row for each source analyzed; in common usage,
this means that there will be one row.
""" |
from ...io import Path
import shutil, signal, tempfile
if infile is None:
raise ValueError('must specify infile')
kwargs.update(dict(
infile = infile,
psfmethod = psfmethod,
conf = conf,
verbose = verbose,
clobber = 'yes',
outroot = 'sf',
))
argv = ['srcflux'] + ['%s=%s' % t for t in kwargs.items()]
argstr = ' '.join(argv)
tempdir = None
try:
tempdir = tempfile.mkdtemp(prefix='srcflux')
proc = env.launch(argv, cwd=tempdir, shell=False)
retcode = proc.wait()
if retcode > 0:
raise RuntimeError('command "%s" failed with exit code %d' % (argstr, retcode))
elif retcode == -signal.SIGINT:
raise KeyboardInterrupt()
elif retcode < 0:
raise RuntimeError('command "%s" killed by signal %d' % (argstr, -retcode))
tables = list(Path(tempdir).glob('*.flux'))
if len(tables) != 1:
raise RuntimeError('expected exactly one flux table from srcflux; got %d' % len(tables))
return tables[0].read_fits_bintable(hdu=1)
finally:
if tempdir is not None:
shutil.rmtree(tempdir, onerror=_rmtree_error) |
<SYSTEM_TASK:>
Create a calculator initialized to reproduce Figure 9 from FK10.
<END_TASK>
<USER_TASK:>
Description:
def new_for_fk10_fig9(cls, shlib_path):
"""Create a calculator initialized to reproduce Figure 9 from FK10.
This is mostly to provide a handy way to create a new
:class:`Calculator` instance that is initialized with reasonable
values for all of its parameters.
""" |
inst = (cls(shlib_path)
.set_thermal_background(2.1e7, 3e9)
.set_bfield(48)
.set_edist_powerlaw(0.016, 4.0, 3.7, 5e9/3)
.set_freqs(100, 0.5, 50)
.set_hybrid_parameters(12, 12)
.set_ignore_q_terms(False)
.set_obs_angle(50 * np.pi / 180)
.set_padist_gaussian_loss_cone(0.5 * np.pi, 0.4)
.set_trapezoidal_integration(15))
# haven't yet figure out how to deal with this part:
inst.in_vals[0] = 1.33e18
inst.in_vals[1] = 6e8
return inst |
<SYSTEM_TASK:>
Set the strength of the local magnetic field.
<END_TASK>
<USER_TASK:>
Description:
def set_bfield(self, B_G):
"""Set the strength of the local magnetic field.
**Call signature**
*B_G*
The magnetic field strength, in Gauss
Returns
*self* for convenience in chaining.
""" |
if not (B_G > 0):
raise ValueError('must have B_G > 0; got %r' % (B_G,))
self.in_vals[IN_VAL_B] = B_G
return self |
<SYSTEM_TASK:>
Set B to probe a certain harmonic number.
<END_TASK>
<USER_TASK:>
Description:
def set_bfield_for_s0(self, s0):
"""Set B to probe a certain harmonic number.
**Call signature**
*s0*
The harmonic number to probe at the lowest frequency
Returns
*self* for convenience in chaining.
This just proceeds from the relation ``nu = s nu_c = s e B / 2 pi m_e
c``. Since *s* and *nu* scale with each other, if multiple frequencies
are being probed, the harmonic numbers being probed will scale in the
same way.
""" |
if not (s0 > 0):
raise ValueError('must have s0 > 0; got %r' % (s0,))
B0 = 2 * np.pi * cgs.me * cgs.c * self.in_vals[IN_VAL_FREQ0] / (cgs.e * s0)
self.in_vals[IN_VAL_B] = B0
return self |
<SYSTEM_TASK:>
Set the energy distribution function to a power law.
<END_TASK>
<USER_TASK:>
Description:
def set_edist_powerlaw(self, emin_mev, emax_mev, delta, ne_cc):
"""Set the energy distribution function to a power law.
**Call signature**
*emin_mev*
The minimum energy of the distribution, in MeV
*emax_mev*
The maximum energy of the distribution, in MeV
*delta*
The power-law index of the distribution
*ne_cc*
The number density of energetic electrons, in cm^-3.
Returns
*self* for convenience in chaining.
""" |
if not (emin_mev >= 0):
raise ValueError('must have emin_mev >= 0; got %r' % (emin_mev,))
if not (emax_mev >= emin_mev):
raise ValueError('must have emax_mev >= emin_mev; got %r, %r' % (emax_mev, emin_mev))
if not (delta >= 0):
raise ValueError('must have delta >= 0; got %r, %r' % (delta,))
if not (ne_cc >= 0):
raise ValueError('must have ne_cc >= 0; got %r, %r' % (ne_cc,))
self.in_vals[IN_VAL_EDIST] = EDIST_PLW
self.in_vals[IN_VAL_EMIN] = emin_mev
self.in_vals[IN_VAL_EMAX] = emax_mev
self.in_vals[IN_VAL_DELTA1] = delta
self.in_vals[IN_VAL_NB] = ne_cc
return self |
<SYSTEM_TASK:>
Set the energy distribution function to a power law in the Lorentz factor
<END_TASK>
<USER_TASK:>
Description:
def set_edist_powerlaw_gamma(self, gmin, gmax, delta, ne_cc):
"""Set the energy distribution function to a power law in the Lorentz factor
**Call signature**
*gmin*
The minimum Lorentz factor of the distribution
*gmax*
The maximum Lorentz factor of the distribution
*delta*
The power-law index of the distribution
*ne_cc*
The number density of energetic electrons, in cm^-3.
Returns
*self* for convenience in chaining.
""" |
if not (gmin >= 1):
raise ValueError('must have gmin >= 1; got %r' % (gmin,))
if not (gmax >= gmin):
raise ValueError('must have gmax >= gmin; got %r, %r' % (gmax, gmin))
if not (delta >= 0):
raise ValueError('must have delta >= 0; got %r, %r' % (delta,))
if not (ne_cc >= 0):
raise ValueError('must have ne_cc >= 0; got %r, %r' % (ne_cc,))
self.in_vals[IN_VAL_EDIST] = EDIST_PLG
self.in_vals[IN_VAL_EMIN] = (gmin - 1) * E0_MEV
self.in_vals[IN_VAL_EMAX] = (gmax - 1) * E0_MEV
self.in_vals[IN_VAL_DELTA1] = delta
self.in_vals[IN_VAL_NB] = ne_cc
return self |
<SYSTEM_TASK:>
Set the frequency grid on which to perform the calculations.
<END_TASK>
<USER_TASK:>
Description:
def set_freqs(self, n, f_lo_ghz, f_hi_ghz):
"""Set the frequency grid on which to perform the calculations.
**Call signature**
*n*
The number of frequency points to sample.
*f_lo_ghz*
The lowest frequency to sample, in GHz.
*f_hi_ghz*
The highest frequency to sample, in GHz.
Returns
*self* for convenience in chaining.
""" |
if not (f_lo_ghz >= 0):
raise ValueError('must have f_lo_ghz >= 0; got %r' % (f_lo_ghz,))
if not (f_hi_ghz >= f_lo_ghz):
raise ValueError('must have f_hi_ghz >= f_lo_ghz; got %r, %r' % (f_hi_ghz, f_lo_ghz))
if not n >= 1:
raise ValueError('must have n >= 1; got %r' % (n,))
self.in_vals[IN_VAL_NFREQ] = n
self.in_vals[IN_VAL_FREQ0] = f_lo_ghz * 1e9 # GHz => Hz
self.in_vals[IN_VAL_LOGDFREQ] = np.log10(f_hi_ghz / f_lo_ghz) / n
return self |
<SYSTEM_TASK:>
Set the observer angle relative to the field.
<END_TASK>
<USER_TASK:>
Description:
def set_obs_angle(self, theta_rad):
"""Set the observer angle relative to the field.
**Call signature**
*theta_rad*
The angle between the ray path and the local magnetic field,
in radians.
Returns
*self* for convenience in chaining.
""" |
self.in_vals[IN_VAL_THETA] = theta_rad * 180 / np.pi # rad => deg
return self |
<SYSTEM_TASK:>
Set the code to calculate results at just one frequency.
<END_TASK>
<USER_TASK:>
Description:
def set_one_freq(self, f_ghz):
"""Set the code to calculate results at just one frequency.
**Call signature**
*f_ghz*
The frequency to sample, in GHz.
Returns
*self* for convenience in chaining.
""" |
if not (f_ghz >= 0):
raise ValueError('must have f_lo_ghz >= 0; got %r' % (f_lo_ghz,))
self.in_vals[IN_VAL_NFREQ] = 1
self.in_vals[IN_VAL_FREQ0] = f_ghz * 1e9 # GHz -> Hz
self.in_vals[IN_VAL_LOGDFREQ] = 1.0
return self |
<SYSTEM_TASK:>
Set the pitch-angle distribution to a Gaussian loss cone.
<END_TASK>
<USER_TASK:>
Description:
def set_padist_gaussian_loss_cone(self, boundary_rad, expwidth):
"""Set the pitch-angle distribution to a Gaussian loss cone.
**Call signature**
*boundary_rad*
The angle inside which there are no losses, in radians.
*expwidth*
The characteristic width of the Gaussian loss profile
*in direction-cosine units*.
Returns
*self* for convenience in chaining.
See ``OnlineI.pdf`` in the Supplementary Data for a precise
definition. (And note the distinction between α_c and μ_c since not
everything is direction cosines.)
""" |
self.in_vals[IN_VAL_PADIST] = PADIST_GLC
self.in_vals[IN_VAL_LCBDY] = boundary_rad * 180 / np.pi # rad => deg
self.in_vals[IN_VAL_DELTAMU] = expwidth
return self |
<SYSTEM_TASK:>
Set the properties of the background thermal plasma.
<END_TASK>
<USER_TASK:>
Description:
def set_thermal_background(self, T_K, nth_cc):
"""Set the properties of the background thermal plasma.
**Call signature**
*T_K*
The temperature of the background plasma, in Kelvin.
*nth_cc*
The number density of thermal electrons, in cm^-3.
Returns
*self* for convenience in chaining.
Note that the parameters set here are the same as the ones that
describe the thermal electron distribution, if you choose one of the
electron energy distributions that explicitly models a thermal
component ("thm", "tnt", "tnp", "tng", "kappa" in the code's
terminology). For the power-law-y electron distributions, these
parameters are used to calculate dispersion parameters (e.g.
refractive indices) and a free-free contribution, but their
synchrotron contribution is ignored.
""" |
if not (T_K >= 0):
raise ValueError('must have T_K >= 0; got %r' % (T_K,))
if not (nth_cc >= 0):
raise ValueError('must have nth_cc >= 0; got %r, %r' % (nth_cc,))
self.in_vals[IN_VAL_T0] = T_K
self.in_vals[IN_VAL_N0] = nth_cc
return self |
<SYSTEM_TASK:>
Set the code to use trapezoidal integration.
<END_TASK>
<USER_TASK:>
Description:
def set_trapezoidal_integration(self, n):
"""Set the code to use trapezoidal integration.
**Call signature**
*n*
Use this many nodes
Returns
*self* for convenience in chaining.
""" |
if not (n >= 2):
raise ValueError('must have n >= 2; got %r' % (n,))
self.in_vals[IN_VAL_INTEG_METH] = n + 1
return self |
<SYSTEM_TASK:>
Figure out emission and absorption coefficients for the current parameters.
<END_TASK>
<USER_TASK:>
Description:
def find_rt_coefficients(self, depth0=None):
"""Figure out emission and absorption coefficients for the current parameters.
**Argument**
*depth0* (default None)
A first guess to use for a good integration depth, in cm. If None,
the most recent value is used.
**Return value**
A tuple ``(j_O, alpha_O, j_X, alpha_X)``, where:
*j_O*
The O-mode emission coefficient, in erg/s/cm^3/Hz/sr.
*alpha_O*
The O-mode absorption coefficient, in cm^-1.
*j_X*
The X-mode emission coefficient, in erg/s/cm^3/Hz/sr.
*alpha_X*
The X-mode absorption coefficient, in cm^-1.
The main outputs of the FK10 code are intensities and "damping
factors" describing a radiative transfer integration of the emission
from a homogeneous source. But there are times when we'd rather just
know what the actual emission and absorption coefficients are. These
can be backed out from the FK10 outputs, but only if the "damping
factor" takes on an intermediate value not extremely close to either 0
or 1. Unfortunately, there's no way for us to know a priori what
choice of the "depth" parameter will yield a nice value for the
damping factor. This routine automatically figures one out, by
repeatedly running the calculation.
To keep things simple, this routine requires that you only be solving
for coefficients for one frequency at a time (e.g.,
:meth:`set_one_freq`).
""" |
if self.in_vals[IN_VAL_NFREQ] != 1:
raise Exception('must have nfreq=1 to run Calculator.find_rt_coefficients()')
if depth0 is not None:
depth = depth0
self.in_vals[IN_VAL_DEPTH] = depth0
else:
depth = self.in_vals[IN_VAL_DEPTH]
scale_factor = 100
buf = np.empty((1, 5), dtype=np.float32)
def classify(damping_factor):
if damping_factor >= 0.99:
return 1
if damping_factor <= 0.01:
return -1
return 0
DONE, SHRINK, GROW, ABORT = 0, 1, 2, 3
actions = {
(-1, -1): SHRINK,
(-1, 0): SHRINK,
(-1, 1): ABORT,
( 0, -1): SHRINK,
( 0, 0): DONE,
( 0, 1): GROW,
( 1, -1): ABORT,
( 1, 0): GROW,
( 1, 1): GROW,
}
last_change = DONE # our first change will be treated as a change in direction
for attempt_number in range(20):
self.compute_lowlevel(out_values=buf)
co = classify(buf[0,OUT_VAL_ODAMP])
cx = classify(buf[0,OUT_VAL_XDAMP])
action = actions[co, cx]
###print('Z', attempt_number, self.in_vals[IN_VAL_DEPTH], last_change, buf, co, cx, action)
if action == DONE:
break
elif action == ABORT:
raise Exception('depths of X and O modes are seriously incompatible')
elif action == GROW:
if last_change != GROW:
scale_factor *= 0.3
depth *= scale_factor
last_change = GROW
elif action == SHRINK:
if last_change != SHRINK:
scale_factor *= 0.3
depth /= scale_factor
last_change = SHRINK
self.in_vals[IN_VAL_DEPTH] = depth
else:
# If we get here, we never explicitly quit the loop
raise Exception('depth-finding algorithm did not converge!')
# OK, we found some good depths! Now calculate the RT coefficients. I believe that
# I'm doing this right ...
sfu_to_specintens = 1e4 * cgs.cgsperjy * cgs.cmperau**2 / self.in_vals[IN_VAL_AREA]
damp_X = buf[0,OUT_VAL_XDAMP]
alpha_X = -np.log(damp_X) / depth
si_X = buf[0,OUT_VAL_XINT] * sfu_to_specintens
j_X = si_X * alpha_X / (1 - damp_X)
damp_O = buf[0,OUT_VAL_ODAMP]
alpha_O = -np.log(damp_O) / depth
si_O = buf[0,OUT_VAL_OINT] * sfu_to_specintens
j_O = si_O * alpha_O / (1 - damp_O)
return (j_O, alpha_O, j_X, alpha_X) |
<SYSTEM_TASK:>
Figure out total-intensity emission and absorption coefficients for the
<END_TASK>
<USER_TASK:>
Description:
def find_rt_coefficients_tot_intens(self, depth0=None):
"""Figure out total-intensity emission and absorption coefficients for the
current parameters.
**Argument**
*depth0* (default None)
A first guess to use for a good integration depth, in cm. If None,
the most recent value is used.
**Return value**
A tuple ``(j_I, alpha_I)``, where:
*j_I*
The total intensity emission coefficient, in erg/s/cm^3/Hz/sr.
*alpha_I*
The total intensity absorption coefficient, in cm^-1.
See :meth:`find_rt_coefficients` for an explanation how this routine
works. This version merely postprocesses the results from that method
to convert the coefficients to refer to total intensity.
""" |
j_O, alpha_O, j_X, alpha_X = self.find_rt_coefficients(depth0=depth0)
j_I = j_O + j_X
alpha_I = 0.5 * (alpha_O + alpha_X) # uhh... right?
return (j_I, alpha_I) |
<SYSTEM_TASK:>
Return a function that joins paths onto some base directory.
<END_TASK>
<USER_TASK:>
Description:
def make_path_func (*baseparts):
"""Return a function that joins paths onto some base directory.""" |
from os.path import join
base = join (*baseparts)
def path_func (*args):
return join (base, *args)
return path_func |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.