code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def clear_all():
_TABLES.clear()
_COLUMNS.clear()
_STEPS.clear()
_BROADCASTS.clear()
_INJECTABLES.clear()
_TABLE_CACHE.clear()
_COLUMN_CACHE.clear()
_INJECTABLE_CACHE.clear()
for m in _MEMOIZED.values():
m.value.clear_cached()
_MEMOIZED.clear()
logger.debug('pipeline state cleared') | Clear any and all stored state from Orca. |
def clear_cache(scope=None):
if not scope:
_TABLE_CACHE.clear()
_COLUMN_CACHE.clear()
_INJECTABLE_CACHE.clear()
for m in _MEMOIZED.values():
m.value.clear_cached()
logger.debug('pipeline cache cleared')
else:
for d in (_TABLE_CACHE, _COLUMN_CACHE, _INJECTABLE_CACHE):
items = tz.valfilter(lambda x: x.scope == scope, d)
for k in items:
del d[k]
for m in tz.filter(lambda x: x.scope == scope, _MEMOIZED.values()):
m.value.clear_cached()
logger.debug('cleared cached values with scope {!r}'.format(scope)) | Clear all cached data.
Parameters
----------
scope : {None, 'step', 'iteration', 'forever'}, optional
Clear cached values with a given scope.
By default all cached values are removed. |
def _collect_variables(names, expressions=None):
# Map registered variable labels to expressions.
if not expressions:
expressions = []
offset = len(names) - len(expressions)
labels_map = dict(tz.concatv(
tz.compatibility.zip(names[:offset], names[:offset]),
tz.compatibility.zip(names[offset:], expressions)))
all_variables = tz.merge(_INJECTABLES, _TABLES)
variables = {}
for label, expression in labels_map.items():
# In the future, more registered variable expressions could be
# supported. Currently supports names of registered variables
# and references to table columns.
if '.' in expression:
# Registered variable expression refers to column.
table_name, column_name = expression.split('.')
table = get_table(table_name)
variables[label] = table.get_column(column_name)
else:
thing = all_variables[expression]
if isinstance(thing, (_InjectableFuncWrapper, TableFuncWrapper)):
# Registered variable object is function.
variables[label] = thing()
else:
variables[label] = thing
return variables | Map labels and expressions to registered variables.
Handles argument matching.
Example:
_collect_variables(names=['zones', 'zone_id'],
expressions=['parcels.zone_id'])
Would return a dict representing:
{'parcels': <DataFrameWrapper for zones>,
'zone_id': <pandas.Series for parcels.zone_id>}
Parameters
----------
names : list of str
List of registered variable names and/or labels.
If mixing names and labels, labels must come at the end.
expressions : list of str, optional
List of registered variable expressions for labels defined
at end of `names`. Length must match the number of labels.
Returns
-------
variables : dict
Keys match `names`. Values correspond to registered variables,
which may be wrappers or evaluated functions if appropriate. |
def add_table(
table_name, table, cache=False, cache_scope=_CS_FOREVER,
copy_col=True):
if isinstance(table, Callable):
table = TableFuncWrapper(table_name, table, cache=cache,
cache_scope=cache_scope, copy_col=copy_col)
else:
table = DataFrameWrapper(table_name, table, copy_col=copy_col)
# clear any cached data from a previously registered table
table.clear_cached()
logger.debug('registering table {!r}'.format(table_name))
_TABLES[table_name] = table
return table | Register a table with Orca.
Parameters
----------
table_name : str
Should be globally unique to this table.
table : pandas.DataFrame or function
If a function, the function should return a DataFrame.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
cache : bool, optional
Whether to cache the results of a provided callable. Does not
apply if `table` is a DataFrame.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
copy_col : bool, optional
Whether to return copies when evaluating columns.
Returns
-------
wrapped : `DataFrameWrapper` or `TableFuncWrapper` |
def table(
table_name=None, cache=False, cache_scope=_CS_FOREVER, copy_col=True):
def decorator(func):
if table_name:
name = table_name
else:
name = func.__name__
add_table(
name, func, cache=cache, cache_scope=cache_scope,
copy_col=copy_col)
return func
return decorator | Decorates functions that return DataFrames.
Decorator version of `add_table`. Table name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected. |
def get_table(table_name):
table = get_raw_table(table_name)
if isinstance(table, TableFuncWrapper):
table = table()
return table | Get a registered table.
Decorated functions will be converted to `DataFrameWrapper`.
Parameters
----------
table_name : str
Returns
-------
table : `DataFrameWrapper` |
def table_type(table_name):
table = get_raw_table(table_name)
if isinstance(table, DataFrameWrapper):
return 'dataframe'
elif isinstance(table, TableFuncWrapper):
return 'function' | Returns the type of a registered table.
The type can be either "dataframe" or "function".
Parameters
----------
table_name : str
Returns
-------
table_type : {'dataframe', 'function'} |
def add_column(
table_name, column_name, column, cache=False, cache_scope=_CS_FOREVER):
if isinstance(column, Callable):
column = \
_ColumnFuncWrapper(
table_name, column_name, column,
cache=cache, cache_scope=cache_scope)
else:
column = _SeriesWrapper(table_name, column_name, column)
# clear any cached data from a previously registered column
column.clear_cached()
logger.debug('registering column {!r} on table {!r}'.format(
column_name, table_name))
_COLUMNS[(table_name, column_name)] = column
return column | Add a new column to a table from a Series or callable.
Parameters
----------
table_name : str
Table with which the column will be associated.
column_name : str
Name for the column.
column : pandas.Series or callable
Series should have an index matching the table to which it
is being added. If a callable, the function's argument
names and keyword argument values will be matched to
registered variables when the function needs to be
evaluated by Orca. The function should return a Series.
cache : bool, optional
Whether to cache the results of a provided callable. Does not
apply if `column` is a Series.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline. |
def column(table_name, column_name=None, cache=False, cache_scope=_CS_FOREVER):
def decorator(func):
if column_name:
name = column_name
else:
name = func.__name__
add_column(
table_name, name, func, cache=cache, cache_scope=cache_scope)
return func
return decorator | Decorates functions that return a Series.
Decorator version of `add_column`. Series index must match
the named table. Column name defaults to name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
The index of the returned Series must match the named table. |
def _columns_for_table(table_name):
return {cname: col
for (tname, cname), col in _COLUMNS.items()
if tname == table_name} | Return all of the columns registered for a given table.
Parameters
----------
table_name : str
Returns
-------
columns : dict of column wrappers
Keys will be column names. |
def get_raw_column(table_name, column_name):
try:
return _COLUMNS[(table_name, column_name)]
except KeyError:
raise KeyError('column {!r} not found for table {!r}'.format(
column_name, table_name)) | Get a wrapped, registered column.
This function cannot return columns that are part of wrapped
DataFrames, it's only for columns registered directly through Orca.
Parameters
----------
table_name : str
column_name : str
Returns
-------
wrapped : _SeriesWrapper or _ColumnFuncWrapper |
def _memoize_function(f, name, cache_scope=_CS_FOREVER):
cache = {}
@wraps(f)
def wrapper(*args, **kwargs):
try:
cache_key = (
args or None, frozenset(kwargs.items()) if kwargs else None)
in_cache = cache_key in cache
except TypeError:
raise TypeError(
'function arguments must be hashable for memoization')
if _CACHING and in_cache:
return cache[cache_key]
else:
result = f(*args, **kwargs)
cache[cache_key] = result
return result
wrapper.__wrapped__ = f
wrapper.cache = cache
wrapper.clear_cached = lambda: cache.clear()
_MEMOIZED[name] = CacheItem(name, wrapper, cache_scope)
return wrapper | Wraps a function for memoization and ties it's cache into the
Orca cacheing system.
Parameters
----------
f : function
name : str
Name of injectable.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline. |
def add_injectable(
name, value, autocall=True, cache=False, cache_scope=_CS_FOREVER,
memoize=False):
if isinstance(value, Callable):
if autocall:
value = _InjectableFuncWrapper(
name, value, cache=cache, cache_scope=cache_scope)
# clear any cached data from a previously registered value
value.clear_cached()
elif not autocall and memoize:
value = _memoize_function(value, name, cache_scope=cache_scope)
logger.debug('registering injectable {!r}'.format(name))
_INJECTABLES[name] = value | Add a value that will be injected into other functions.
Parameters
----------
name : str
value
If a callable and `autocall` is True then the function's
argument names and keyword argument values will be matched
to registered variables when the function needs to be
evaluated by Orca. The return value will
be passed to any functions using this injectable. In all other
cases, `value` will be passed through untouched.
autocall : bool, optional
Set to True to have injectable functions automatically called
(with argument matching) and the result injected instead of
the function itself.
cache : bool, optional
Whether to cache the return value of an injectable function.
Only applies when `value` is a callable and `autocall` is True.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
memoize : bool, optional
If autocall is False it is still possible to cache function results
by setting this flag to True. Cached values are stored in a dictionary
keyed by argument values, so the argument values must be hashable.
Memoized functions have their caches cleared according to the same
rules as universal caching. |
def injectable(
name=None, autocall=True, cache=False, cache_scope=_CS_FOREVER,
memoize=False):
def decorator(func):
if name:
n = name
else:
n = func.__name__
add_injectable(
n, func, autocall=autocall, cache=cache, cache_scope=cache_scope,
memoize=memoize)
return func
return decorator | Decorates functions that will be injected into other functions.
Decorator version of `add_injectable`. Name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected. |
def get_injectable(name):
i = get_raw_injectable(name)
return i() if isinstance(i, _InjectableFuncWrapper) else i | Get an injectable by name. *Does not* evaluate wrapped functions.
Parameters
----------
name : str
Returns
-------
injectable
Original value or evaluated value of an _InjectableFuncWrapper. |
def get_injectable_func_source_data(name):
if injectable_type(name) != 'function':
raise ValueError('injectable {!r} is not a function'.format(name))
inj = get_raw_injectable(name)
if isinstance(inj, _InjectableFuncWrapper):
return utils.func_source_data(inj._func)
elif hasattr(inj, '__wrapped__'):
return utils.func_source_data(inj.__wrapped__)
else:
return utils.func_source_data(inj) | Return data about an injectable function's source, including file name,
line number, and source code.
Parameters
----------
name : str
Returns
-------
filename : str
lineno : int
The line number on which the function starts.
source : str |
def add_step(step_name, func):
if isinstance(func, Callable):
logger.debug('registering step {!r}'.format(step_name))
_STEPS[step_name] = _StepFuncWrapper(step_name, func)
else:
raise TypeError('func must be a callable') | Add a step function to Orca.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
Parameters
----------
step_name : str
func : callable |
def step(step_name=None):
def decorator(func):
if step_name:
name = step_name
else:
name = func.__name__
add_step(name, func)
return func
return decorator | Decorates functions that will be called by the `run` function.
Decorator version of `add_step`. step name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected. |
def broadcast(cast, onto, cast_on=None, onto_on=None,
cast_index=False, onto_index=False):
logger.debug(
'registering broadcast of table {!r} onto {!r}'.format(cast, onto))
_BROADCASTS[(cast, onto)] = \
Broadcast(cast, onto, cast_on, onto_on, cast_index, onto_index) | Register a rule for merging two tables by broadcasting one onto
the other.
Parameters
----------
cast, onto : str
Names of registered tables.
cast_on, onto_on : str, optional
Column names used for merge, equivalent of ``left_on``/``right_on``
parameters of pandas.merge.
cast_index, onto_index : bool, optional
Whether to use table indexes for merge. Equivalent of
``left_index``/``right_index`` parameters of pandas.merge. |
def _get_broadcasts(tables):
tables = set(tables)
casts = tz.keyfilter(
lambda x: x[0] in tables and x[1] in tables, _BROADCASTS)
if tables - set(tz.concat(casts.keys())):
raise ValueError('Not enough links to merge all tables.')
return casts | Get the broadcasts associated with a set of tables.
Parameters
----------
tables : sequence of str
Table names for which broadcasts have been registered.
Returns
-------
casts : dict of `Broadcast`
Keys are tuples of strings like (cast_name, onto_name). |
def get_broadcast(cast_name, onto_name):
if is_broadcast(cast_name, onto_name):
return _BROADCASTS[(cast_name, onto_name)]
else:
raise KeyError(
'no rule found for broadcasting {!r} onto {!r}'.format(
cast_name, onto_name)) | Get a single broadcast.
Broadcasts are stored data about how to do a Pandas join.
A Broadcast object is a namedtuple with these attributes:
- cast: the name of the table being broadcast
- onto: the name of the table onto which "cast" is broadcast
- cast_on: The optional name of a column on which to join.
None if the table index will be used instead.
- onto_on: The optional name of a column on which to join.
None if the table index will be used instead.
- cast_index: True if the table index should be used for the join.
- onto_index: True if the table index should be used for the join.
Parameters
----------
cast_name : str
The name of the table being braodcast.
onto_name : str
The name of the table onto which `cast_name` is broadcast.
Returns
-------
broadcast : Broadcast |
def _all_reachable_tables(t):
for k, v in t.items():
for tname in _all_reachable_tables(v):
yield tname
yield k | A generator that provides all the names of tables that can be
reached via merges starting at the given target table. |
def _recursive_getitem(d, key):
if key in d:
return d
else:
for v in d.values():
return _recursive_getitem(v, key)
else:
raise KeyError('Key not found: {}'.format(key)) | Descend into a dict of dicts to return the one that contains
a given key. Every value in the dict must be another dict. |
def _dict_value_to_pairs(d):
d = d[tz.first(d)]
for k, v in d.items():
yield {k: v} | Takes the first value of a dictionary (which it self should be
a dictionary) and turns it into a series of {key: value} dicts.
For example, _dict_value_to_pairs({'c': {'a': 1, 'b': 2}}) will yield
{'a': 1} and {'b': 2}. |
def _next_merge(merge_node):
if all(_is_leaf_node(d) for d in _dict_value_to_pairs(merge_node)):
return merge_node
else:
for d in tz.remove(_is_leaf_node, _dict_value_to_pairs(merge_node)):
return _next_merge(d)
else:
raise OrcaError('No node found for next merge.') | Gets a node that has only leaf nodes below it. This table and
the ones below are ready to be merged to make a new leaf node. |
def get_step_table_names(steps):
table_names = set()
for s in steps:
table_names |= get_step(s)._tables_used()
return list(table_names) | Returns a list of table names injected into the provided steps.
Parameters
----------
steps: list of str
Steps to gather table inputs from.
Returns
-------
list of str |
def write_tables(fname, table_names=None, prefix=None, compress=False, local=False):
if table_names is None:
table_names = list_tables()
tables = (get_table(t) for t in table_names)
key_template = '{}/{{}}'.format(prefix) if prefix is not None else '{}'
# set compression options to zlib level-1 if compress arg is True
complib = compress and 'zlib' or None
complevel = compress and 1 or 0
with pd.HDFStore(fname, mode='a', complib=complib, complevel=complevel) as store:
for t in tables:
# if local arg is True, store only local columns
columns = None
if local is True:
columns = t.local_columns
store[key_template.format(t.name)] = t.to_frame(columns=columns) | Writes tables to a pandas.HDFStore file.
Parameters
----------
fname : str
File name for HDFStore. Will be opened in append mode and closed
at the end of this function.
table_names: list of str, optional, default None
List of tables to write. If None, all registered tables will
be written.
prefix: str
If not None, used to prefix the output table names so that
multiple iterations can go in the same file.
compress: boolean
Whether to compress output file using standard HDF5-readable
zlib compression, default False. |
def injectables(**kwargs):
global _INJECTABLES
original = _INJECTABLES.copy()
_INJECTABLES.update(kwargs)
yield
_INJECTABLES = original | Temporarily add injectables to the pipeline environment.
Takes only keyword arguments.
Injectables will be returned to their original state when the context
manager exits. |
def temporary_tables(**kwargs):
global _TABLES
original = _TABLES.copy()
for k, v in kwargs.items():
if not isinstance(v, pd.DataFrame):
raise ValueError('tables only accepts DataFrames')
add_table(k, v)
yield
_TABLES = original | Temporarily set DataFrames as registered tables.
Tables will be returned to their original state when the context
manager exits. Caching is not enabled for tables registered via
this function. |
def eval_variable(name, **kwargs):
with injectables(**kwargs):
vars = _collect_variables([name], [name])
return vars[name] | Execute a single variable function registered with Orca
and return the result. Any keyword arguments are temporarily set
as injectables. This gives the value as would be injected into a function.
Parameters
----------
name : str
Name of variable to evaluate.
Use variable expressions to specify columns.
Returns
-------
object
For injectables and columns this directly returns whatever
object is returned by the registered function.
For tables this returns a DataFrameWrapper as if the table
had been injected into a function. |
def to_frame(self, columns=None):
extra_cols = _columns_for_table(self.name)
if columns is not None:
columns = [columns] if isinstance(columns, str) else columns
columns = set(columns)
set_extra_cols = set(extra_cols)
local_cols = set(self.local.columns) & columns - set_extra_cols
df = self.local[list(local_cols)].copy()
extra_cols = {k: extra_cols[k] for k in (columns & set_extra_cols)}
else:
df = self.local.copy()
with log_start_finish(
'computing {!r} columns for table {!r}'.format(
len(extra_cols), self.name),
logger):
for name, col in extra_cols.items():
with log_start_finish(
'computing column {!r} for table {!r}'.format(
name, self.name),
logger):
df[name] = col()
return df | Make a DataFrame with the given columns.
Will always return a copy of the underlying table.
Parameters
----------
columns : sequence or string, optional
Sequence of the column names desired in the DataFrame. A string
can also be passed if only one column is desired.
If None all columns are returned, including registered columns.
Returns
-------
frame : pandas.DataFrame |
def update_col(self, column_name, series):
logger.debug('updating column {!r} in table {!r}'.format(
column_name, self.name))
self.local[column_name] = series | Add or replace a column in the underlying DataFrame.
Parameters
----------
column_name : str
Column to add or replace.
series : pandas.Series or sequence
Column data. |
def get_column(self, column_name):
with log_start_finish(
'getting single column {!r} from table {!r}'.format(
column_name, self.name),
logger):
extra_cols = _columns_for_table(self.name)
if column_name in extra_cols:
with log_start_finish(
'computing column {!r} for table {!r}'.format(
column_name, self.name),
logger):
column = extra_cols[column_name]()
else:
column = self.local[column_name]
if self.copy_col:
return column.copy()
else:
return column | Returns a column as a Series.
Parameters
----------
column_name : str
Returns
-------
column : pandas.Series |
def column_type(self, column_name):
extra_cols = list_columns_for_table(self.name)
if column_name in extra_cols:
col = _COLUMNS[(self.name, column_name)]
if isinstance(col, _SeriesWrapper):
return 'series'
elif isinstance(col, _ColumnFuncWrapper):
return 'function'
elif column_name in self.local_columns:
return 'local'
raise KeyError('column {!r} not found'.format(column_name)) | Report column type as one of 'local', 'series', or 'function'.
Parameters
----------
column_name : str
Returns
-------
col_type : {'local', 'series', 'function'}
'local' means that the column is part of the registered table,
'series' means the column is a registered Pandas Series,
and 'function' means the column is a registered function providing
a Pandas Series. |
def update_col_from_series(self, column_name, series, cast=False):
logger.debug('updating column {!r} in table {!r}'.format(
column_name, self.name))
col_dtype = self.local[column_name].dtype
if series.dtype != col_dtype:
if cast:
series = series.astype(col_dtype)
else:
err_msg = "Data type mismatch, existing:{}, update:{}"
err_msg = err_msg.format(col_dtype, series.dtype)
raise ValueError(err_msg)
self.local.loc[series.index, column_name] = series | Update existing values in a column from another series.
Index values must match in both column and series. Optionally
casts data type to match the existing column.
Parameters
---------------
column_name : str
series : panas.Series
cast: bool, optional, default False |
def clear_cached(self):
_TABLE_CACHE.pop(self.name, None)
for col in _columns_for_table(self.name).values():
col.clear_cached()
logger.debug('cleared cached columns for table {!r}'.format(self.name)) | Remove cached results from this table's computed columns. |
def local_columns(self):
if self._columns:
return self._columns
else:
self._call_func()
return self._columns | Only the columns contained in the DataFrame returned by the
wrapped function. (No registered columns included.) |
def _call_func(self):
if _CACHING and self.cache and self.name in _TABLE_CACHE:
logger.debug('returning table {!r} from cache'.format(self.name))
return _TABLE_CACHE[self.name].value
with log_start_finish(
'call function to get frame for table {!r}'.format(
self.name),
logger):
kwargs = _collect_variables(names=self._argspec.args,
expressions=self._argspec.defaults)
frame = self._func(**kwargs)
self._columns = list(frame.columns)
self._index = frame.index
self._len = len(frame)
wrapped = DataFrameWrapper(self.name, frame, copy_col=self.copy_col)
if self.cache:
_TABLE_CACHE[self.name] = CacheItem(
self.name, wrapped, self.cache_scope)
return wrapped | Call the wrapped function and return the result wrapped by
DataFrameWrapper.
Also updates attributes like columns, index, and length. |
def get_column(self, column_name):
frame = self._call_func()
return DataFrameWrapper(self.name, frame,
copy_col=self.copy_col).get_column(column_name) | Returns a column as a Series.
Parameters
----------
column_name : str
Returns
-------
column : pandas.Series |
def clear_cached(self):
x = _COLUMN_CACHE.pop((self.table_name, self.name), None)
if x is not None:
logger.debug(
'cleared cached value for column {!r} in table {!r}'.format(
self.name, self.table_name)) | Remove any cached result of this column. |
def clear_cached(self):
x = _INJECTABLE_CACHE.pop(self.name, None)
if x:
logger.debug(
'injectable {!r} removed from cache'.format(self.name)) | Clear a cached result for this injectable. |
def _tables_used(self):
args = list(self._argspec.args)
if self._argspec.defaults:
default_args = list(self._argspec.defaults)
else:
default_args = []
# Combine names from argument names and argument default values.
names = args[:len(args) - len(default_args)] + default_args
tables = set()
for name in names:
parent_name = name.split('.')[0]
if is_table(parent_name):
tables.add(parent_name)
return tables | Tables injected into the step.
Returns
-------
tables : set of str |
def qbe_tree(graph, nodes, root=None):
if root:
start = root
else:
index = random.randint(0, len(nodes) - 1)
start = nodes[index]
# A queue to BFS instead DFS
to_visit = deque()
cnodes = copy(nodes)
visited = set()
# Format is (parent, parent_edge, neighbor, neighbor_field)
to_visit.append((None, None, start, None))
tree = {}
while len(to_visit) != 0 and nodes:
parent, parent_edge, v, v_edge = to_visit.pop()
# Prune
if v in nodes:
nodes.remove(v)
node = graph[v]
if v not in visited and len(node) > 1:
visited.add(v)
# Preorder process
if all((parent, parent_edge, v, v_edge)):
if parent not in tree:
tree[parent] = []
if (parent_edge, v, v_edge) not in tree[parent]:
tree[parent].append((parent_edge, v, v_edge))
if v not in tree:
tree[v] = []
if (v_edge, parent, parent_edge) not in tree[v]:
tree[v].append((v_edge, parent, parent_edge))
# Iteration
for node_edge, neighbor, neighbor_edge in node:
value = (v, node_edge, neighbor, neighbor_edge)
to_visit.append(value)
remove_leafs(tree, cnodes)
return tree, (len(nodes) == 0) | Given a graph, nodes to explore and an optinal root, do a breadth-first
search in order to return the tree. |
def combine(items, k=None):
length_items = len(items)
lengths = [len(i) for i in items]
length = reduce(lambda x, y: x * y, lengths)
repeats = [reduce(lambda x, y: x * y, lengths[i:])
for i in range(1, length_items)] + [1]
if k is not None:
k = k % length
# Python division by default is integer division (~ floor(a/b))
indices = [old_div((k % (lengths[i] * repeats[i])), repeats[i])
for i in range(length_items)]
return [items[i][indices[i]] for i in range(length_items)]
else:
matrix = []
for i, item in enumerate(items):
row = []
for subset in item:
row.extend([subset] * repeats[i])
times = old_div(length, len(row))
matrix.append(row * times)
# Transpose the matrix or return the columns instead rows
return list(zip(*matrix)) | Create a matrix in wich each row is a tuple containing one of solutions or
solution k-esima. |
def pickle_encode(session_dict):
"Returns the given session dictionary pickled and encoded as a string."
pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
return base64.encodestring(pickled + get_query_hash(pickled).encode()f pickle_encode(session_dict):
"Returns the given session dictionary pickled and encoded as a string."
pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
return base64.encodestring(pickled + get_query_hash(pickled).encode()) | Returns the given session dictionary pickled and encoded as a string. |
def func_source_data(func):
filename = inspect.getsourcefile(func)
lineno = inspect.getsourcelines(func)[1]
source = inspect.getsource(func)
return filename, lineno, source | Return data about a function source, including file name,
line number, and source code.
Parameters
----------
func : object
May be anything support by the inspect module, such as a function,
method, or class.
Returns
-------
filename : str
lineno : int
The line number on which the function starts.
source : str |
def clean(self):
if any(self.errors):
# Don't bother validating the formset unless each form is valid on
# its own
return
(selects, aliases, froms, wheres, sorts, groups_by,
params) = self.get_query_parts()
if not selects:
validation_message = _(u"At least you must check a row to get.")
raise forms.ValidationError(validation_message)
self._selects = selects
self._aliases = aliases
self._froms = froms
self._wheres = wheres
self._sorts = sorts
self._groups_by = groups_by
self._params = params | Checks that there is almost one field to select |
def get_results(self, limit=None, offset=None, query=None, admin_name=None,
row_number=False):
add_extra_ids = (admin_name is not None)
if not query:
sql = self.get_raw_query(limit=limit, offset=offset,
add_extra_ids=add_extra_ids)
else:
sql = query
if settings.DEBUG:
print(sql)
cursor = self._db_connection.cursor()
cursor.execute(sql, tuple(self._params))
query_results = cursor.fetchall()
if admin_name and not self._groups_by:
selects = self._get_selects_with_extra_ids()
results = []
try:
offset = int(offset)
except ValueError:
offset = 0
for r, row in enumerate(query_results):
i = 0
l = len(row)
if row_number:
result = [(r + offset + 1, u"#row%s" % (r + offset + 1))]
else:
result = []
while i < l:
appmodel, field = selects[i].split(".")
appmodel = self._unquote_name(appmodel)
field = self._unquote_name(field)
try:
if appmodel in self._models:
_model = self._models[appmodel]
_appmodel = u"%s_%s" % (_model._meta.app_label,
_model._meta.model_name)
else:
_appmodel = appmodel
admin_url = reverse("%s:%s_change" % (
admin_name,
_appmodel),
args=[row[i + 1]]
)
except NoReverseMatch:
admin_url = None
result.append((row[i], admin_url))
i += 2
results.append(result)
return results
else:
if row_number:
results = []
for r, row in enumerate(query_results):
result = [r + 1]
for cell in row:
result.append(cell)
results.append(result)
return results
else:
return query_results | Fetch all results after perform SQL query and |
def parse_content_type(content_type):
if '; charset=' in content_type:
return tuple(content_type.split('; charset='))
else:
if 'text' in content_type:
encoding = 'ISO-8859-1'
else:
try:
format = formats.find_by_content_type(content_type)
except formats.UnknownFormat:
encoding = 'ISO-8859-1'
else:
encoding = format.default_encoding or 'ISO-8859-1'
return (content_type, encoding) | Return a tuple of content type and charset.
:param content_type: A string describing a content type. |
def parse_http_accept_header(header):
components = [item.strip() for item in header.split(',')]
l = []
for component in components:
if ';' in component:
subcomponents = [item.strip() for item in component.split(';')]
l.append(
(
subcomponents[0], # eg. 'text/html'
subcomponents[1][2:] # eg. 'q=0.9'
)
)
else:
l.append((component, '1'))
l.sort(
key = lambda i: i[1],
reverse = True
)
content_types = []
for i in l:
content_types.append(i[0])
return content_types | Return a list of content types listed in the HTTP Accept header
ordered by quality.
:param header: A string describing the contents of the HTTP Accept header. |
def parse_multipart_data(request):
return MultiPartParser(
META=request.META,
input_data=StringIO(request.body),
upload_handlers=request.upload_handlers,
encoding=request.encoding
).parse() | Parse a request with multipart data.
:param request: A HttpRequest instance. |
def override_supported_formats(formats):
def decorator(function):
@wraps(function)
def wrapper(self, *args, **kwargs):
self.supported_formats = formats
return function(self, *args, **kwargs)
return wrapper
return decorator | Override the views class' supported formats for the decorated function.
Arguments:
formats -- A list of strings describing formats, e.g. ``['html', 'json']``. |
def route(regex, method, name):
def decorator(function):
function.route = routes.route(
regex = regex,
view = function.__name__,
method = method,
name = name
)
@wraps(function)
def wrapper(self, *args, **kwargs):
return function(self, *args, **kwargs)
return wrapper
return decorator | Route the decorated view.
:param regex: A string describing a regular expression to which the request path will be matched.
:param method: A string describing the HTTP method that this view accepts.
:param name: A string describing the name of the URL pattern.
``regex`` may also be a lambda that accepts the parent resource's ``prefix`` argument and returns
a string describing a regular expression to which the request path will be matched.
``name`` may also be a lambda that accepts the parent resource's ``views`` argument and returns
a string describing the name of the URL pattern. |
def before(method_name):
def decorator(function):
@wraps(function)
def wrapper(self, *args, **kwargs):
returns = getattr(self, method_name)(*args, **kwargs)
if returns is None:
return function(self, *args, **kwargs)
else:
if isinstance(returns, HttpResponse):
return returns
else:
return function(self, *returns)
return wrapper
return decorator | Run the given method prior to the decorated view.
If you return anything besides ``None`` from the given method,
its return values will replace the arguments of the decorated
view.
If you return an instance of ``HttpResponse`` from the given method,
Respite will return it immediately without delegating the request to the
decorated view.
Example usage::
class ArticleViews(Views):
@before('_load')
def show(self, request, article):
return self._render(
request = request,
template = 'show',
context = {
'article': article
}
)
def _load(self, request, id):
try:
return request, Article.objects.get(id=id)
except Article.DoesNotExist:
return self._error(request, 404, message='The article could not be found.')
:param method: A string describing a class method. |
def index(self, request):
objects = self.model.objects.all()
return self._render(
request = request,
template = 'index',
context = {
cc2us(pluralize(self.model.__name__)): objects,
},
status = 200
) | Render a list of objects. |
def new(self, request):
form = (self.form or generate_form(self.model))()
return self._render(
request = request,
template = 'new',
context = {
'form': form
},
status = 200
) | Render a form to create a new object. |
def create(self, request):
form = (self.form or generate_form(self.model))(request.POST)
if form.is_valid():
object = form.save()
return self._render(
request = request,
template = 'show',
context = {
cc2us(self.model.__name__): object
},
status = 201
)
else:
return self._render(
request = request,
template = 'new',
context = {
'form': form
},
status = 400
) | Create a new object. |
def edit(self, request, id):
try:
object = self.model.objects.get(id=id)
except self.model.DoesNotExist:
return self._render(
request = request,
template = '404',
context = {
'error': 'The %s could not be found.' % self.model.__name__.lower()
},
status = 404,
prefix_template_path = False
)
form = (self.form or generate_form(self.model))(instance=object)
# Add "_method" field to override request method to PUT
form.fields['_method'] = CharField(required=True, initial='PUT', widget=HiddenInput)
return self._render(
request = request,
template = 'edit',
context = {
cc2us(self.model.__name__): object,
'form': form
},
status = 200
) | Render a form to edit an object. |
def update(self, request, id):
try:
object = self.model.objects.get(id=id)
except self.model.DoesNotExist:
return self._render(
request = request,
template = '404',
context = {
'error': 'The %s could not be found.' % self.model.__name__.lower()
},
status = 404,
prefix_template_path = False
)
fields = []
for field in request.PATCH:
try:
self.model._meta.get_field_by_name(field)
except FieldDoesNotExist:
continue
else:
fields.append(field)
Form = generate_form(
model = self.model,
form = self.form,
fields = fields
)
form = Form(request.PATCH, instance=object)
if form.is_valid():
object = form.save()
return self.show(request, id)
else:
return self._render(
request = request,
template = 'edit',
context = {
'form': form
},
status = 400
) | Update an object. |
def replace(self, request, id):
try:
object = self.model.objects.get(id=id)
except self.model.DoesNotExist:
return self._render(
request = request,
template = '404',
context = {
'error': 'The %s could not be found.' % self.model.__name__.lower()
},
status = 404,
prefix_template_path = False
)
form = (self.form or generate_form(self.model))(request.PUT, instance=object)
if form.is_valid():
object = form.save()
return self.show(request, id)
else:
return self._render(
request = request,
template = 'edit',
context = {
'form': form
},
status = 400
) | Replace an object. |
def destroy(self, request, id):
try:
object = self.model.objects.get(id=id)
object.delete()
except self.model.DoesNotExist:
return self._render(
request = request,
template = '404',
context = {
'error': 'The %s could not be found.' % self.model.__name__.lower()
},
status = 404,
prefix_template_path = False
)
return self._render(
request = request,
template = 'destroy',
status = 200
) | Delete an object. |
def get_search_fields(cls):
sfdict = {}
for klass in tuple(cls.__bases__) + (cls, ):
if hasattr(klass, 'search_fields'):
sfdict.update(klass.search_fields)
return sfdict | Returns search fields in sfdict |
def find(identifier):
for format in FORMATS:
if identifier in [format.name, format.acronym, format.extension]:
return format
raise UnknownFormat('No format found with name, acronym or extension "%s"' % identifier) | Find and return a format by name, acronym or extension.
:param identifier: A string describing the format. |
def find_by_name(name):
for format in FORMATS:
if name == format.name:
return format
raise UnknownFormat('No format found with name "%s"' % name) | Find and return a format by name.
:param name: A string describing the name of the format. |
def find_by_extension(extension):
for format in FORMATS:
if extension in format.extensions:
return format
raise UnknownFormat('No format found with extension "%s"' % extension) | Find and return a format by extension.
:param extension: A string describing the extension of the format. |
def find_by_content_type(content_type):
for format in FORMATS:
if content_type in format.content_types:
return format
raise UnknownFormat('No format found with content type "%s"' % content_type) | Find and return a format by content type.
:param content_type: A string describing the internet media type of the format. |
def options(self, request, map, *args, **kwargs):
options = {}
for method, function in map.items():
options[method] = function.__doc__
return self._render(
request = request,
template = 'options',
context = {
'options': options
},
status = 200,
headers = {
'Allow': ', '.join(options.keys())
}
) | List communication options. |
def _error(self, request, status, headers={}, prefix_template_path=False, **kwargs):
return self._render(
request = request,
template = str(status),
status = status,
context = {
'error': kwargs
},
headers = headers,
prefix_template_path = prefix_template_path
) | Convenience method to render an error response. The template is inferred from the status code.
:param request: A django.http.HttpRequest instance.
:param status: An integer describing the HTTP status code to respond with.
:param headers: A dictionary describing HTTP headers.
:param prefix_template_path: A boolean describing whether to prefix the template with the view's template path.
:param kwargs: Any additional keyword arguments to inject. These are wrapped under ``error`` for convenience.
For implementation details, see ``render`` |
def find(format):
try:
serializer = SERIALIZERS[format]
except KeyError:
raise UnknownSerializer('No serializer found for %s' % format.acronym)
return serializer | Find and return a serializer for the given format.
Arguments:
format -- A Format instance. |
def get_form_kwargs(self):
update_data ={}
sfdict = self.filter_class.get_search_fields()
for fieldname in sfdict:
try:
has_multiple = sfdict[fieldname].get('multiple', False)
except:
has_multiple = False
if has_multiple:
value = self.request.GET.getlist(fieldname, [])
else:
value = self.request.GET.get(fieldname, None)
update_data[fieldname] = value
if self.order_field:
update_data[self.order_field] = self.request.GET.get(self.order_field, None)
initial = self.get_initial()
initial.update(update_data)
kwargs = {'initial': initial }
if self.groups_for_userlist != None:
pot_users = User.objects.exclude(id=self.request.user.id)
if len(self.groups_for_userlist):
pot_users = pot_users.filter(groups__name__in = self.groups_for_userlist)
pot_users = pot_users.distinct().order_by('username')
user_choices = tuple([(user.id, str(user)) for user in pot_users])
kwargs['user_choices'] = user_choices
return kwargs | Returns the keyword arguments for instantiating the search form. |
def us2mc(string):
return re.sub(r'_([a-z])', lambda m: (m.group(1).upper()), string) | Transform an underscore_case string to a mixedCase string |
def generate_form(model, form=None, fields=False, exclude=False):
_model, _fields, _exclude = model, fields, exclude
class Form(form or forms.ModelForm):
class Meta:
model = _model
if _fields is not False:
fields = _fields
if _exclude is not False:
exclude = _exclude
return Form | Generate a form from a model.
:param model: A Django model.
:param form: A Django form.
:param fields: A list of fields to include in this form.
:param exclude: A list of fields to exclude in this form. |
def route(regex, view, method, name):
return _Route(regex, view, method, name) | Route the given view.
:param regex: A string describing a regular expression to which the request path will be matched.
:param view: A string describing the name of the view to delegate the request to.
:param method: A string describing the HTTP method that this view accepts.
:param name: A string describing the name of the URL pattern.
``regex`` may also be a lambda that accepts the parent resource's ``prefix`` argument and returns
a string describing a regular expression to which the request path will be matched.
``name`` may also be a lambda that accepts the parent resource's ``views`` argument and returns
a string describing the name of the URL pattern. |
def sample_double_norm(mean, std_upper, std_lower, size):
from scipy.special import erfinv
# There's probably a better way to do this. We first draw percentiles
# uniformly between 0 and 1. We want the peak of the distribution to occur
# at `mean`. However, if we assign 50% of the samples to the lower half
# and 50% to the upper half, the side with the smaller variance will be
# overrepresented because of the 1/sigma normalization of the Gaussian
# PDF. Therefore we need to divide points between the two halves with a
# fraction `cutoff` (defined below) going to the lower half. Having
# partitioned them this way, we can then use the standard Gaussian
# quantile function to go from percentiles to sample values -- except that
# we must remap from [0, cutoff] to [0, 0.5] and from [cutoff, 1] to [0.5,
# 1].
samples = np.empty(size)
percentiles = np.random.uniform(0., 1., size)
cutoff = std_lower / (std_lower + std_upper)
w = (percentiles < cutoff)
percentiles[w] *= 0.5 / cutoff
samples[w] = mean + np.sqrt(2) * std_lower * erfinv(2 * percentiles[w] - 1)
w = ~w
percentiles[w] = 1 - (1 - percentiles[w]) * 0.5 / (1 - cutoff)
samples[w] = mean + np.sqrt(2) * std_upper * erfinv(2 * percentiles[w] - 1)
return samples | Note that this function requires Scipy. |
def sample_gamma(alpha, beta, size):
if alpha <= 0:
raise ValueError('alpha must be positive; got %e' % alpha)
if beta <= 0:
raise ValueError('beta must be positive; got %e' % beta)
return np.random.gamma(alpha, scale=1./beta, size=size) | This is mostly about recording the conversion between Numpy/Scipy
conventions and Wikipedia conventions. Some equations:
mean = alpha / beta
variance = alpha / beta**2
mode = (alpha - 1) / beta [if alpha > 1; otherwise undefined]
skewness = 2 / sqrt(alpha) |
def find_gamma_params(mode, std):
if mode < 0:
raise ValueError('input mode must be positive for gamma; got %e' % mode)
var = std**2
beta = (mode + np.sqrt(mode**2 + 4 * var)) / (2 * var)
j = 2 * var / mode**2
alpha = (j + 1 + np.sqrt(2 * j + 1)) / j
if alpha <= 1:
raise ValueError('couldn\'t compute self-consistent gamma parameters: '
'mode=%e std=%e alpha=%e beta=%e' % (mode, std, alpha, beta))
return alpha, beta | Given a modal value and a standard deviation, compute corresponding
parameters for the gamma distribution.
Intended to be used to replace normal distributions when the value must be
positive and the uncertainty is comparable to the best value. Conversion
equations determined from the relations given in the sample_gamma()
docs. |
def _lval_add_towards_polarity(x, polarity):
if x < 0:
if polarity < 0:
return Lval('toinf', x)
return Lval('pastzero', x)
elif polarity > 0:
return Lval('toinf', x)
return Lval('pastzero', x) | Compute the appropriate Lval "kind" for the limit of value `x` towards
`polarity`. Either 'toinf' or 'pastzero' depending on the sign of `x` and
the infinity direction of polarity. |
def unwrap(msmt):
if np.isscalar(msmt):
return float(msmt)
if isinstance(msmt, (Uval, Lval)):
return msmt
if isinstance(msmt, Textual):
return msmt.unwrap()
raise ValueError('don\'t know how to treat %r as a measurement' % msmt) | Convert the value into the most basic representation that we can do
math on: float if possible, then Uval, then Lval. |
def repval(msmt, limitsok=False):
if np.isscalar(msmt):
return float(msmt)
if isinstance(msmt, Uval):
return msmt.repvals(uval_default_repval_method)[0]
if isinstance(msmt, Lval):
if not limitsok and msmt.kind in('tozero', 'toinf', 'pastzero'):
raise LimitError()
return msmt.value
if isinstance(msmt, Textual):
return msmt.repval(limitsok=limitsok)
raise ValueError('don\'t know how to treat %r as a measurement' % msmt) | Get a best-effort representative value as a float. This is DANGEROUS
because it discards limit information, which is rarely wise. m_liminfo()
or m_unwrap() are recommended instead. |
def limtype(msmt):
if np.isscalar(msmt):
return 0
if isinstance(msmt, Uval):
return 0
if isinstance(msmt, Lval):
if msmt.kind == 'undef':
raise ValueError('no simple limit type for Lval %r' % msmt)
# Quasi-hack here: limits of ('tozero', [positive number]) are
# reported as upper limits. In a plot full of fluxes this would be
# what makes sense, but note that this would be misleading if the
# quantity in question was something that could go negative.
p = msmt._polarity()
if p == -2 or p == 1:
return -1
if p == 2 or p == -1:
return 1
return 0
if isinstance(msmt, Textual):
return msmt.limtype()
raise ValueError('don\'t know how to treat %r as a measurement' % msmt) | Return -1 if this value is some kind of upper limit, 1 if this value
is some kind of lower limit, 0 otherwise. |
def errinfo(msmt):
if isinstance(msmt, Textual):
msmt = msmt.unwrap()
if np.isscalar(msmt):
return 0, msmt, msmt, msmt
if isinstance(msmt, Uval):
rep, plus1, minus1 = msmt.repvals(uval_default_repval_method)
return 0, rep, plus1, minus1
if isinstance(msmt, Lval):
return limtype(msmt), msmt.value, msmt.value, msmt.value
raise ValueError('don\'t know how to treat %r as a measurement' % msmt) | Return (limtype, repval, errval1, errval2). Like m_liminfo, but also
provides error bar information for values that have it. |
def fmtinfo(value):
if value is None:
raise ValueError('cannot format None!')
if isinstance(value, text_type):
return '', value, False
if isinstance(value, bool):
# Note: isinstance(True, int) = True, so this must come before the next case.
if value:
return 'b', 'y', False
return 'b', '', False
if isinstance(value, six.integer_types):
return 'i', text_type(value), False
if isinstance(value, float):
return 'f', text_type(value), True
if hasattr(value, '__pk_fmtinfo__'):
return value.__pk_fmtinfo__()
raise ValueError('don\'t know how to format %r as a measurement' % value) | Returns (typetag, text, is_imprecise). Unlike other functions that operate
on measurements, this also operates on bools, ints, and strings. |
def from_pcount(nevents):
if nevents < 0:
raise ValueError('Poisson parameter `nevents` must be nonnegative')
return Uval(np.random.gamma(nevents + 1, size=uval_nsamples)) | We assume a Poisson process. nevents is the number of events in
some interval. The distribution of values is the distribution of the
Poisson rate parameter given this observed number of events, where the
"rate" is in units of events per interval of the same duration. The
max-likelihood value is nevents, but the mean value is nevents + 1.
The gamma distribution is obtained by assuming an improper, uniform
prior for the rate between 0 and infinity. |
def repvals(self, method):
if method == 'pct':
return pk_scoreatpercentile(self.d, [50., 84.134, 15.866])
if method == 'gauss':
m, s = self.d.mean(), self.d.std()
return np.asarray([m, m + s, m - s])
raise ValueError('unknown representative-value method "%s"' % method) | Compute representative statistical values for this Uval. `method`
may be either 'pct' or 'gauss'.
Returns (best, plus_one_sigma, minus_one_sigma), where `best` is the
"best" value in some sense, and the others correspond to values at
the ~84 and 16 percentile limits, respectively. Because of the
sampled nature of the Uval system, there is no single method to
compute these numbers.
The "pct" method returns the 50th, 15.866th, and 84.134th percentile
values.
The "gauss" method computes the mean μ and standard deviation σ of the
samples and returns [μ, μ+σ, μ-σ]. |
def repval(self, limitsok=False):
if not limitsok and self.dkind in ('lower', 'upper'):
raise LimitError()
if self.dkind == 'unif':
lower, upper = map(float, self.data)
v = 0.5 * (lower + upper)
elif self.dkind in _noextra_dkinds:
v = float(self.data)
elif self.dkind in _yesextra_dkinds:
v = float(self.data[0])
else:
raise RuntimeError('can\'t happen')
if self.tkind == 'log10':
return 10**v
return v | Get a best-effort representative value as a float. This can be
DANGEROUS because it discards limit information, which is rarely wise. |
def in_casapy (helper, vis=None, figfile=None):
if vis is None:
raise ValueError ('vis')
helper.casans.plotants (vis=vis, figfile=figfile) | This function is run inside the weirdo casapy IPython environment! A
strange set of modules is available, and the
`pwkit.environments.casa.scripting` system sets up a very particular
environment to allow encapsulated scripting. |
def datasets(dataset, node, ll=None, ur=None, start_date=None, end_date=None, api_key=None):
payload = {
"node": node,
"apiKey": api_key
}
if dataset:
payload["datasetName"] = dataset
if ll and ur:
payload["lowerLeft"] = {
"latitude": ll["latitude"],
"longitude": ll["longitude"]
}
payload["upperRight"] = {
"latitude": ur["latitude"],
"longitude": ur["longitude"]
}
if start_date:
payload["startDate"] = start_date
if end_date:
payload["endDate"] = end_date
return json.dumps(payload) | This method is used to find datasets available for searching.
By passing no parameters except node, all available datasets
are returned. Additional parameters such as temporal range
and spatial bounding box can be used to find datasets that
provide more specific data. The dataset name parameter can
be used to limit the results based on matching the supplied
value against the dataset name with assumed wildcards at the
beginning and end. All parameters are optional except for
the 'node' parameter.
:param dataset:
Dataset Identifier
:param ll:
Lower left corner of an AOI bounding box - in decimal form
Longitude/Latitude dictionary
e.g. { "longitude": 0.0, "latitude": 0.0 }
:param ur:
Upper right corner of an AOI bounding box - in decimal form
Longitude/Latitude dictionary
e.g. { "longitude": 0.0, "latitude": 0.0 }
:param start_date:
Used for searching scene acquisition - will accept anything
that the PHP strtotime function can understand
:param end_date:
Used for searching scene acquisition - will accept anything
that the PHP strtotime function can understand
:param node:
The requested Catalog
:param api_key:
API key is not required. |
def download(dataset, node, entityids, products, api_key=None):
payload = {
"datasetName": dataset,
"node": node,
"apiKey": api_key,
"entityIds": entityids,
"products": products
}
return json.dumps(payload) | The use of this request will be to obtain valid data download URLs.
:param dataset:
:param entityIds:
list
:param products:
list
:param node:
:param api_key:
API key is required. |
def download_options(dataset, node, entityids, api_key=None):
payload = {
"apiKey": api_key,
"datasetName": dataset,
"node": node,
"entityIds": entityids
}
return json.dumps(payload) | The use of the download options request is to discover the different download
options for each scene. Some download options may exist but still be unavailable
due to disk usage and many other factors. If a download is unavailable
it may need to be ordered.
:param dataset:
:param node:
:param entityIds:
:param api_key:
API key is not required. |
def login(username, password, catalogId='EE'):
payload = {
"username": username,
"password": password,
"authType": "",
"catalogId": catalogId
}
return json.dumps(payload) | This method requires SSL be used due to the sensitive nature of
users passwords. Upon a successful login, an API key will be
returned. This key will be active for one hour and should be
destroyed upon final use of the service by calling the logout
method. Users must have "Machine to Machine" access based on
a user-based role in the users profile.
:param username:
:param password: |
def metadata(dataset, node, entityids, api_key=None):
payload = {
"apiKey": api_key,
"datasetName": dataset,
"node": node,
"entityIds": entityids
}
return json.dumps(payload) | The use of the metadata request is intended for those who have
acquired scene IDs from a different source. It will return the
same metadata that is available via the search request.
:param dataset:
:param node:
:param sceneid:
:param api_key: |
def approx_colormap (samples, transform='none', fitfactor=1.):
import scipy.interpolate as SI
values = samples[0]
if transform == 'none':
pass
elif transform == 'reverse':
samples = samples[:,::-1]
elif transform == 'sqrt':
values = np.sqrt (values)
else:
raise ValueError ('unknown transformation: ' + str (transform))
nsamp = samples.shape[1]
rspline = SI.splrep (values, samples[R+1], s=fitfactor/nsamp)
gspline = SI.splrep (values, samples[G+1], s=fitfactor/nsamp)
bspline = SI.splrep (values, samples[B+1], s=fitfactor/nsamp)
def colormap (values):
values = np.asarray (values)
mapped = np.empty (values.shape + (3,))
flatvalues = values.flatten ()
flatmapped = mapped.reshape (flatvalues.shape + (3,))
flatmapped[:,R] = SI.splev (flatvalues, rspline)
flatmapped[:,G] = SI.splev (flatvalues, gspline)
flatmapped[:,B] = SI.splev (flatvalues, bspline)
return mapped
return colormap | Given a colormap sampled at various values, compute splines that
interpolate in R, G, and B (separately) for fast evaluation of the
colormap for arbitrary float values. We have primitive support for some
transformations, though these are generally best done upstream of the
color mapping code.
samples - Shape (4, n). samples[0,:] are the normalized values at
which the map is sampled, hopefully ranging uniformly
between 0 and 1. samples[1:4,:] are the RGB values of
the colormap. (They don't need to actually be RGB, but
there need to be three of them.)
transform - One of 'none', 'reverse', or 'sqrt'.
fitfactor - Sets the tightness of the spline interpolation.
Returns: a function `map` following `map(n) -> rgb`, where if `n` has
shape S the result has shape shape (S + (3,)), following a spline
interpolation from the sampled values. |
def srgb_to_linsrgb (srgb):
gamma = ((srgb + 0.055) / 1.055)**2.4
scale = srgb / 12.92
return np.where (srgb > 0.04045, gamma, scale) | Convert sRGB values to physically linear ones. The transformation is
uniform in RGB, so *srgb* can be of any shape.
*srgb* values should range between 0 and 1, inclusively. |
def linsrgb_to_srgb (linsrgb):
# From Wikipedia, but easy analogue to the above.
gamma = 1.055 * linsrgb**(1./2.4) - 0.055
scale = linsrgb * 12.92
return np.where (linsrgb > 0.0031308, gamma, scale) | Convert physically linear RGB values into sRGB ones. The transform is
uniform in the components, so *linsrgb* can be of any shape.
*linsrgb* values should range between 0 and 1, inclusively. |
def xyz_to_cielab (xyz, refwhite):
norm = xyz / refwhite
pow = norm**0.333333333333333
scale = 7.787037 * norm + 16./116
mapped = np.where (norm > 0.008856, pow, scale)
cielab = np.empty_like (xyz)
cielab[...,L] = 116 * mapped[...,Y] - 16
cielab[...,A] = 500 * (mapped[...,X] - mapped[...,Y])
cielab[...,B] = 200 * (mapped[...,Y] - mapped[...,Z])
return cielab | Convert CIE XYZ color values to CIE L*a*b*.
*xyz* should be of shape (*, 3). *refwhite* is the reference white value, of
shape (3, ).
Return value will have same shape as *xyz*, but be in CIE L*a*b*
coordinates. |
def cielab_to_xyz (cielab, refwhite):
def func (t):
pow = t**3
scale = 0.128419 * t - 0.0177129
return np.where (t > 0.206897, pow, scale)
xyz = np.empty_like (cielab)
lscale = 1./116 * (cielab[...,L] + 16)
xyz[...,X] = func (lscale + 0.002 * cielab[...,A])
xyz[...,Y] = func (lscale)
xyz[...,Z] = func (lscale - 0.005 * cielab[...,B])
xyz *= refwhite
return xyz | Convert CIE L*a*b* color values to CIE XYZ,
*cielab* should be of shape (*, 3). *refwhite* is the reference white
value in the L*a*b* color space, of shape (3, ).
Return value has same shape as *cielab* |
def cielab_to_msh (cielab):
msh = np.empty_like (cielab)
msh[...,M] = np.sqrt ((cielab**2).sum (axis=-1))
msh[...,S] = np.arccos (cielab[...,L] / msh[...,M])
msh[...,H] = np.arctan2 (cielab[...,B], cielab[...,A])
return msh | Convert CIE L*a*b* to Moreland's Msh colorspace.
*cielab* should be of shape (*, 3).
Return value will have same shape. |
def msh_to_cielab (msh):
cielab = np.empty_like (msh)
cielab[...,L] = msh[...,M] * np.cos (msh[...,S])
cielab[...,A] = msh[...,M] * np.sin (msh[...,S]) * np.cos (msh[...,H])
cielab[...,B] = msh[...,M] * np.sin (msh[...,S]) * np.sin (msh[...,H])
return cielab | Convert Moreland's Msh colorspace to CIE L*a*b*.
*msh* should be of shape (*, 3).
Return value will have same shape. |
def moreland_adjusthue (msh, m_unsat):
if msh[M] >= m_unsat:
return msh[H] # "Best we can do"
hspin = (msh[S] * np.sqrt (m_unsat**2 - msh[M]**2) /
(msh[M] * np.sin (msh[S])))
if msh[H] > -np.pi / 3: # "Spin away from purple"
return msh[H] + hspin
return msh[H] - hspin | Moreland's AdjustHue procedure to adjust the hue value of an Msh color
based on ... some criterion.
*msh* should be of of shape (3, ). *m_unsat* is a scalar.
Return value is the adjusted h (hue) value. |
def get_datasets_in_nodes():
data_dir = os.path.join(scriptdir, "..", "usgs", "data")
cwic = map(lambda d: d["datasetName"], api.datasets(None, CWIC_LSI_EXPLORER_CATALOG_NODE)['data'])
ee = map(lambda d: d["datasetName"], api.datasets(None, EARTH_EXPLORER_CATALOG_NODE)['data'])
hdds = map(lambda d: d["datasetName"], api.datasets(None, HDDS_EXPLORER_CATALOG_NODE)['data'])
lpcs = map(lambda d: d["datasetName"], api.datasets(None, LPCS_EXPLORER_CATALOG_NODE)['data'])
# Create mapping from dataset to node
datasets = {}
datasets.update( { ds : "CWIC" for ds in cwic } )
datasets.update( { ds : "EE" for ds in ee } )
datasets.update( { ds : "HDDS" for ds in hdds } )
datasets.update( { ds : "LPCS" for ds in lpcs } )
datasets_path = os.path.join(data_dir, "datasets.json")
with open(datasets_path, "w") as f:
f.write(json.dumps(datasets))
# Find the datasets with ambiguous nodes
cwic_ee = [ds for ds in cwic if ds in ee]
cwic_hdds = [ds for ds in cwic if ds in hdds]
cwic_lpcs = [ds for ds in cwic if ds in lpcs]
ee_hdds = [ds for ds in ee if ds in hdds]
ee_lpcs = [ds for ds in ee if ds in lpcs]
hdds_lpcs = [ds for ds in hdds if ds in lpcs] | Get the node associated with each dataset. Some datasets
will have an ambiguous node since they exists in more than
one node. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.